diff --git a/go.mod b/go.mod index 831ae449..7c5a2f68 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,11 @@ module github.com/bitrise-io/bitrise-webhooks -go 1.21 +go 1.22.0 + +toolchain go1.23.2 require ( - cloud.google.com/go/pubsub v1.42.0 + cloud.google.com/go/pubsub v1.45.1 github.com/bitrise-io/api-utils v0.0.0-20211025122143-6499571b8433 github.com/bitrise-io/envman v0.0.0-20240730123632-8066eeb61599 github.com/go-playground/webhooks/v6 v6.4.0 @@ -11,23 +13,23 @@ require ( github.com/gorilla/mux v1.8.1 github.com/pkg/errors v0.9.1 github.com/stretchr/testify v1.9.0 - github.com/xanzy/go-gitlab v0.108.0 + github.com/xanzy/go-gitlab v0.112.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.195.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 + google.golang.org/api v0.203.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.1 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.9 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect - github.com/DataDog/appsec-internal-go v1.7.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + github.com/DataDog/appsec-internal-go v1.8.0 // indirect github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect - github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect + github.com/DataDog/go-libddwaf/v3 v3.4.0 // indirect github.com/DataDog/go-sqllexer v0.0.11 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.5 // indirect @@ -46,7 +48,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect @@ -55,32 +57,32 @@ require ( github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect - github.com/philhofer/fwd v1.1.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/tinylib/msgp v1.1.9 // indirect + github.com/tinylib/msgp v1.2.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index a091f5d6..7ec7d1ce 100644 --- a/go.sum +++ b/go.sum @@ -1,31 +1,31 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.1 h1:+pMtLEV2k0AXKvs/tGZojuj6QaioxfUjOpMsG5Gtx+w= -cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= +cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/kms v1.18.5 h1:75LSlVs60hyHK3ubs2OHd4sE63OAMcM2BdSJc2bkuM4= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= -cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= -cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= -cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= +cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/pubsub v1.45.1 h1:ZC/UzYcrmK12THWn1P72z+Pnp2vu/zCZRXyhAfP1hJY= +cloud.google.com/go/pubsub v1.45.1/go.mod h1:3bn7fTmzZFwaUjllitv1WlsNMkqBgGUb3UdMhI54eCc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= -github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/appsec-internal-go v1.8.0 h1:1Tfn3LEogntRqZtf88twSApOCAAO3V+NILYhuQIo4J4= +github.com/DataDog/appsec-internal-go v1.8.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 h1:/oxF4p/4XUGNpNw2TE7vDu/pJV3elEAZ+jES0/MWtiI= github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 h1:mmkGuCHBFuDBpuwNMcqtY1x1I2fCaPH2Br4xPAAjbkM= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 h1:LplNAmMgZvGU7kKA0+4c1xWOjz828xweW5TCi8Mw9Q0= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= -github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= +github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM= +github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw= github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= @@ -79,8 +79,6 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= -github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -132,8 +130,8 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -162,6 +160,8 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/justinas/alice v1.2.0/go.mod h1:fN5HRH/reO/zrUflLfTN43t3vXvKzvZIENsNEe7i7qA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= @@ -188,8 +188,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4= +github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -227,27 +227,27 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= -github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= -github.com/xanzy/go-gitlab v0.108.0 h1:IEvEUWFR5G1seslRhJ8gC//INiIUqYXuSUoBd7/gFKE= -github.com/xanzy/go-gitlab v0.108.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= +github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU= +github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro= +github.com/xanzy/go-gitlab v0.112.0 h1:6Z0cqEooCvBMfBIHw+CgO4AKGRV8na/9781xOb0+DKw= +github.com/xanzy/go-gitlab v0.112.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= -go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw= +go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -271,8 +271,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -280,8 +280,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -292,11 +292,11 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -317,17 +317,17 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -338,32 +338,34 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= -google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= +google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= +google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= -google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -373,10 +375,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 h1:3Cb46zyKIlEWac21tvDF2O4KyMlOHQxrQkyiaUpdwM0= -gopkg.in/DataDog/dd-trace-go.v1 v1.67.0/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 h1:zSY6DDsFRMQDNQYKWCv/AEwJXoPpDf1FfMyw7I1B7M8= +gopkg.in/DataDog/dd-trace-go.v1 v1.69.0/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -391,11 +393,23 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= +modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= +modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= +modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index ea6df0ca..5b6a2a5b 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,71 @@ # Changelog +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + ## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a..6fe4f076 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 2eb78d7b..314bd292 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -130,7 +135,9 @@ func (t *Token) isEmpty() bool { } // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -321,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() @@ -336,13 +345,14 @@ func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() t := c.cachedToken + now := timeNow() if t == nil || t.Value == "" { return invalid } else if t.Expiry.IsZero() { return fresh - } else if timeNow().After(t.Expiry.Round(0)) { + } else if now.After(t.Expiry.Round(0)) { return invalid - } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { return stale } return fresh diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cce62241..010afc37 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -98,8 +98,8 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index b426e16d..6591b181 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06..d8b5d4fd 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -94,32 +94,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4..6ae29de6 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index efc91c2b..8696df14 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { if tp == nil { return false } @@ -66,6 +66,9 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } @@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 0442a593..42d4cbe3 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -20,15 +22,19 @@ import ( "errors" "fmt" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -38,7 +44,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -46,6 +52,27 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + // ClientCertProvider is a function that returns a TLS client certificate to be // used when opening TLS connections. It follows the same semantics as // [crypto/tls.Config.GetClientCertificate]. @@ -271,7 +298,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -289,9 +319,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.NewClient(endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -325,15 +356,23 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { @@ -384,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt } return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } + +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d..30fedf95 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 07eea474..63498ee7 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -27,11 +28,12 @@ import ( "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, opts) trans = addOCTransport(trans, opts) switch { case opts.DisableAuthentication: @@ -76,7 +81,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ @@ -94,7 +102,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans @@ -171,13 +190,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000..651bd61f --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000..af490bf4 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 00000000..d92178df --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000..16be9df3 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345e..d8c16119 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,8 +38,11 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. @@ -197,7 +200,7 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe_domain") + return metadata.GetWithContext(ctx, "universe/universe-domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 26e037c1..f606888f 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -133,7 +133,11 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return defaultTransportCreds, config.endpoint, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress @@ -177,7 +181,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return config.clientCertSource, nil, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 36651591..6c954ae1 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba2..738cb216 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf8..347aaced 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 4df73edc..37894bfc 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,6 +15,7 @@ package transport import ( + "context" "encoding/json" "fmt" "log" @@ -84,7 +85,7 @@ func getMetadataMTLSAutoConfig() { } var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) + return metadata.GetWithContext(context.Background(), configEndpointSuffix) } func queryConfig() (*mtlsConfig, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b17..cc586ec5 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -81,12 +81,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2..da7db19b 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7..c160b478 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f89..2e53f012 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 63d8364f..498a15a5 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,20 @@ # Changes +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + +## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) + + +### Features + +* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) + ## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08) diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 64aeb66a..11038cca 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,55 @@ # Changes +## [1.45.1](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.45.0...pubsub/v1.45.1) (2024-10-24) + + +### Bug Fixes + +* **pubsub:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) +* **pubsub:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e)) + + +### Documentation + +* **pubsub:** Add doc links to top level package doc ([#11029](https://github.com/googleapis/google-cloud-go/issues/11029)) ([fe2ec56](https://github.com/googleapis/google-cloud-go/commit/fe2ec569029d2052885063b6fca90e1a27424b4e)) + +## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.44.0...pubsub/v1.45.0) (2024-10-22) + + +### Features + +* **pubsub:** Add IngestionFailureEvent to the external proto ([f0b05e2](https://github.com/googleapis/google-cloud-go/commit/f0b05e260435d5e8889b9a0ca0ab215fcde169ab)) +* **pubsub:** Add support for ingestion platform logging settings ([#10969](https://github.com/googleapis/google-cloud-go/issues/10969)) ([c60241f](https://github.com/googleapis/google-cloud-go/commit/c60241f46db2b021d799f621851a352f2baec96e)) + +## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.43.0...pubsub/v1.44.0) (2024-10-08) + + +### Features + +* **pubsub:** Add ingestion Cloud Storage fields and Platform Logging fields to Topic ([7250d71](https://github.com/googleapis/google-cloud-go/commit/7250d714a638dcd5df3fbe0e91c5f1250c3f80f9)) +* **pubsub:** Add support for cloud storage ingestion topics ([#10959](https://github.com/googleapis/google-cloud-go/issues/10959)) ([1a11675](https://github.com/googleapis/google-cloud-go/commit/1a116759ce0d25fdcb5776bf73c52408ae1ec985)) +* **pubsub:** Return listing information for subscriptions created via Analytics Hub ([fdb4ea9](https://github.com/googleapis/google-cloud-go/commit/fdb4ea99189657880e5f0e0dce16bef1c3aa0d2f)) + + +### Documentation + +* **pubsub:** Update documentation for 31 day subscription message retention ([#10845](https://github.com/googleapis/google-cloud-go/issues/10845)) ([9b4b2fa](https://github.com/googleapis/google-cloud-go/commit/9b4b2fa87864906aeae3a8fda460466f951bc6c9)) + +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.42.0...pubsub/v1.43.0) (2024-09-09) + + +### Features + +* **pubsub:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) +* **pubsub:** Allow trace extraction from protobuf message ([#10827](https://github.com/googleapis/google-cloud-go/issues/10827)) ([caa826c](https://github.com/googleapis/google-cloud-go/commit/caa826cea826473ebf4c806b57b0c3b0a2f0f365)) + + +### Bug Fixes + +* **pubsub:** Add attributes before startSpan ([#10800](https://github.com/googleapis/google-cloud-go/issues/10800)) ([48addbf](https://github.com/googleapis/google-cloud-go/commit/48addbff725ee2bb226ce0ab926415c27fd4ffad)) +* **pubsub:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **pubsub:** Close grpc streams on retry ([#10624](https://github.com/googleapis/google-cloud-go/issues/10624)) ([79a0e11](https://github.com/googleapis/google-cloud-go/commit/79a0e118c88190cbe1b56250a75b67bd98b0d7f2)) + ## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.41.0...pubsub/v1.42.0) (2024-08-19) diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go new file mode 100644 index 00000000..c7a04ffb --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go @@ -0,0 +1,56 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package pubsub + +import ( + "iter" + + pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SchemaIterator) All() iter.Seq2[*pubsubpb.Schema, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnapshotIterator) All() iter.Seq2[*pubsubpb.Snapshot, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StringIterator) All() iter.Seq2[string, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SubscriptionIterator) All() iter.Seq2[*pubsubpb.Subscription, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TopicIterator) All() iter.Seq2[*pubsubpb.Topic, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go index 03ac865c..cae0b96f 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -68,6 +68,7 @@ func defaultPublisherGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -587,6 +588,7 @@ func defaultPublisherRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go index b266b23b..f18defab 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsubpb/pubsub.pb.go @@ -119,6 +119,145 @@ func (IngestionDataSourceSettings_AwsKinesis_State) EnumDescriptor() ([]byte, [] return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 0, 0} } +// Possible states for ingestion from Cloud Storage. +type IngestionDataSourceSettings_CloudStorage_State int32 + +const ( + // Default value. This value is unused. + IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED IngestionDataSourceSettings_CloudStorage_State = 0 + // Ingestion is active. + IngestionDataSourceSettings_CloudStorage_ACTIVE IngestionDataSourceSettings_CloudStorage_State = 1 + // Permission denied encountered while calling the Cloud Storage API. This + // can happen if the Pub/Sub SA has not been granted the + // [appropriate + // permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions): + // - storage.objects.list: to list the objects in a bucket. + // - storage.objects.get: to read the objects in a bucket. + // - storage.buckets.get: to verify the bucket exists. + IngestionDataSourceSettings_CloudStorage_CLOUD_STORAGE_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 2 + // Permission denied encountered while publishing to the topic. This can + // happen if the Pub/Sub SA has not been granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) + IngestionDataSourceSettings_CloudStorage_PUBLISH_PERMISSION_DENIED IngestionDataSourceSettings_CloudStorage_State = 3 + // The provided Cloud Storage bucket doesn't exist. + IngestionDataSourceSettings_CloudStorage_BUCKET_NOT_FOUND IngestionDataSourceSettings_CloudStorage_State = 4 + // The Cloud Storage bucket has too many objects, ingestion will be + // paused. + IngestionDataSourceSettings_CloudStorage_TOO_MANY_OBJECTS IngestionDataSourceSettings_CloudStorage_State = 5 +) + +// Enum value maps for IngestionDataSourceSettings_CloudStorage_State. +var ( + IngestionDataSourceSettings_CloudStorage_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "ACTIVE", + 2: "CLOUD_STORAGE_PERMISSION_DENIED", + 3: "PUBLISH_PERMISSION_DENIED", + 4: "BUCKET_NOT_FOUND", + 5: "TOO_MANY_OBJECTS", + } + IngestionDataSourceSettings_CloudStorage_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "ACTIVE": 1, + "CLOUD_STORAGE_PERMISSION_DENIED": 2, + "PUBLISH_PERMISSION_DENIED": 3, + "BUCKET_NOT_FOUND": 4, + "TOO_MANY_OBJECTS": 5, + } +) + +func (x IngestionDataSourceSettings_CloudStorage_State) Enum() *IngestionDataSourceSettings_CloudStorage_State { + p := new(IngestionDataSourceSettings_CloudStorage_State) + *p = x + return p +} + +func (x IngestionDataSourceSettings_CloudStorage_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (IngestionDataSourceSettings_CloudStorage_State) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor() +} + +func (IngestionDataSourceSettings_CloudStorage_State) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[1] +} + +func (x IngestionDataSourceSettings_CloudStorage_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_State.Descriptor instead. +func (IngestionDataSourceSettings_CloudStorage_State) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0} +} + +// Severity levels of Platform Logs. +type PlatformLogsSettings_Severity int32 + +const ( + // Default value. Logs level is unspecified. Logs will be disabled. + PlatformLogsSettings_SEVERITY_UNSPECIFIED PlatformLogsSettings_Severity = 0 + // Logs will be disabled. + PlatformLogsSettings_DISABLED PlatformLogsSettings_Severity = 1 + // Debug logs and higher-severity logs will be written. + PlatformLogsSettings_DEBUG PlatformLogsSettings_Severity = 2 + // Info logs and higher-severity logs will be written. + PlatformLogsSettings_INFO PlatformLogsSettings_Severity = 3 + // Warning logs and higher-severity logs will be written. + PlatformLogsSettings_WARNING PlatformLogsSettings_Severity = 4 + // Only error logs will be written. + PlatformLogsSettings_ERROR PlatformLogsSettings_Severity = 5 +) + +// Enum value maps for PlatformLogsSettings_Severity. +var ( + PlatformLogsSettings_Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "DISABLED", + 2: "DEBUG", + 3: "INFO", + 4: "WARNING", + 5: "ERROR", + } + PlatformLogsSettings_Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "DISABLED": 1, + "DEBUG": 2, + "INFO": 3, + "WARNING": 4, + "ERROR": 5, + } +) + +func (x PlatformLogsSettings_Severity) Enum() *PlatformLogsSettings_Severity { + p := new(PlatformLogsSettings_Severity) + *p = x + return p +} + +func (x PlatformLogsSettings_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PlatformLogsSettings_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor() +} + +func (PlatformLogsSettings_Severity) Type() protoreflect.EnumType { + return &file_google_pubsub_v1_pubsub_proto_enumTypes[2] +} + +func (x PlatformLogsSettings_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PlatformLogsSettings_Severity.Descriptor instead. +func (PlatformLogsSettings_Severity) EnumDescriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0} +} + // The state of the topic. type Topic_State int32 @@ -158,11 +297,11 @@ func (x Topic_State) String() string { } func (Topic_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[1].Descriptor() + return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor() } func (Topic_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[1] + return &file_google_pubsub_v1_pubsub_proto_enumTypes[3] } func (x Topic_State) Number() protoreflect.EnumNumber { @@ -171,7 +310,7 @@ func (x Topic_State) Number() protoreflect.EnumNumber { // Deprecated: Use Topic_State.Descriptor instead. func (Topic_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5, 0} } // Possible states for a subscription. @@ -213,11 +352,11 @@ func (x Subscription_State) String() string { } func (Subscription_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[2].Descriptor() + return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor() } func (Subscription_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[2] + return &file_google_pubsub_v1_pubsub_proto_enumTypes[4] } func (x Subscription_State) Number() protoreflect.EnumNumber { @@ -226,7 +365,7 @@ func (x Subscription_State) Number() protoreflect.EnumNumber { // Deprecated: Use Subscription_State.Descriptor instead. func (Subscription_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20, 0} } // Possible states for a BigQuery subscription. @@ -284,11 +423,11 @@ func (x BigQueryConfig_State) String() string { } func (BigQueryConfig_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[3].Descriptor() + return file_google_pubsub_v1_pubsub_proto_enumTypes[5].Descriptor() } func (BigQueryConfig_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[3] + return &file_google_pubsub_v1_pubsub_proto_enumTypes[5] } func (x BigQueryConfig_State) Number() protoreflect.EnumNumber { @@ -297,7 +436,7 @@ func (x BigQueryConfig_State) Number() protoreflect.EnumNumber { // Deprecated: Use BigQueryConfig_State.Descriptor instead. func (BigQueryConfig_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25, 0} } // Possible states for a Cloud Storage subscription. @@ -352,11 +491,11 @@ func (x CloudStorageConfig_State) String() string { } func (CloudStorageConfig_State) Descriptor() protoreflect.EnumDescriptor { - return file_google_pubsub_v1_pubsub_proto_enumTypes[4].Descriptor() + return file_google_pubsub_v1_pubsub_proto_enumTypes[6].Descriptor() } func (CloudStorageConfig_State) Type() protoreflect.EnumType { - return &file_google_pubsub_v1_pubsub_proto_enumTypes[4] + return &file_google_pubsub_v1_pubsub_proto_enumTypes[6] } func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber { @@ -365,7 +504,7 @@ func (x CloudStorageConfig_State) Number() protoreflect.EnumNumber { // Deprecated: Use CloudStorageConfig_State.Descriptor instead. func (CloudStorageConfig_State) EnumDescriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 0} } // A policy constraining the storage of messages published to the topic. @@ -529,7 +668,11 @@ type IngestionDataSourceSettings struct { // Types that are assignable to Source: // // *IngestionDataSourceSettings_AwsKinesis_ + // *IngestionDataSourceSettings_CloudStorage_ Source isIngestionDataSourceSettings_Source `protobuf_oneof:"source"` + // Optional. Platform Logs settings. If unset, no Platform Logs will be + // generated. + PlatformLogsSettings *PlatformLogsSettings `protobuf:"bytes,4,opt,name=platform_logs_settings,json=platformLogsSettings,proto3" json:"platform_logs_settings,omitempty"` } func (x *IngestionDataSourceSettings) Reset() { @@ -578,6 +721,20 @@ func (x *IngestionDataSourceSettings) GetAwsKinesis() *IngestionDataSourceSettin return nil } +func (x *IngestionDataSourceSettings) GetCloudStorage() *IngestionDataSourceSettings_CloudStorage { + if x, ok := x.GetSource().(*IngestionDataSourceSettings_CloudStorage_); ok { + return x.CloudStorage + } + return nil +} + +func (x *IngestionDataSourceSettings) GetPlatformLogsSettings() *PlatformLogsSettings { + if x != nil { + return x.PlatformLogsSettings + } + return nil +} + type isIngestionDataSourceSettings_Source interface { isIngestionDataSourceSettings_Source() } @@ -587,8 +744,153 @@ type IngestionDataSourceSettings_AwsKinesis_ struct { AwsKinesis *IngestionDataSourceSettings_AwsKinesis `protobuf:"bytes,1,opt,name=aws_kinesis,json=awsKinesis,proto3,oneof"` } +type IngestionDataSourceSettings_CloudStorage_ struct { + // Optional. Cloud Storage. + CloudStorage *IngestionDataSourceSettings_CloudStorage `protobuf:"bytes,2,opt,name=cloud_storage,json=cloudStorage,proto3,oneof"` +} + func (*IngestionDataSourceSettings_AwsKinesis_) isIngestionDataSourceSettings_Source() {} +func (*IngestionDataSourceSettings_CloudStorage_) isIngestionDataSourceSettings_Source() {} + +// Settings for Platform Logs produced by Pub/Sub. +type PlatformLogsSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The minimum severity level of Platform Logs that will be written. + Severity PlatformLogsSettings_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=google.pubsub.v1.PlatformLogsSettings_Severity" json:"severity,omitempty"` +} + +func (x *PlatformLogsSettings) Reset() { + *x = PlatformLogsSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformLogsSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformLogsSettings) ProtoMessage() {} + +func (x *PlatformLogsSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformLogsSettings.ProtoReflect.Descriptor instead. +func (*PlatformLogsSettings) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3} +} + +func (x *PlatformLogsSettings) GetSeverity() PlatformLogsSettings_Severity { + if x != nil { + return x.Severity + } + return PlatformLogsSettings_SEVERITY_UNSPECIFIED +} + +// Payload of the Platform Log entry sent when a failure is encountered while +// ingesting. +type IngestionFailureEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the import topic. Format is: + // projects/{project_name}/topics/{topic_name}. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. Error details explaining why ingestion to Pub/Sub has failed. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + // Types that are assignable to Failure: + // + // *IngestionFailureEvent_CloudStorageFailure_ + Failure isIngestionFailureEvent_Failure `protobuf_oneof:"failure"` +} + +func (x *IngestionFailureEvent) Reset() { + *x = IngestionFailureEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionFailureEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent) ProtoMessage() {} + +func (x *IngestionFailureEvent) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4} +} + +func (x *IngestionFailureEvent) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *IngestionFailureEvent) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (m *IngestionFailureEvent) GetFailure() isIngestionFailureEvent_Failure { + if m != nil { + return m.Failure + } + return nil +} + +func (x *IngestionFailureEvent) GetCloudStorageFailure() *IngestionFailureEvent_CloudStorageFailure { + if x, ok := x.GetFailure().(*IngestionFailureEvent_CloudStorageFailure_); ok { + return x.CloudStorageFailure + } + return nil +} + +type isIngestionFailureEvent_Failure interface { + isIngestionFailureEvent_Failure() +} + +type IngestionFailureEvent_CloudStorageFailure_ struct { + // Optional. Failure when ingesting from Cloud Storage. + CloudStorageFailure *IngestionFailureEvent_CloudStorageFailure `protobuf:"bytes,3,opt,name=cloud_storage_failure,json=cloudStorageFailure,proto3,oneof"` +} + +func (*IngestionFailureEvent_CloudStorageFailure_) isIngestionFailureEvent_Failure() {} + // A topic resource. type Topic struct { state protoimpl.MessageState @@ -638,7 +940,7 @@ type Topic struct { func (x *Topic) Reset() { *x = Topic{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -651,7 +953,7 @@ func (x *Topic) String() string { func (*Topic) ProtoMessage() {} func (x *Topic) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -664,7 +966,7 @@ func (x *Topic) ProtoReflect() protoreflect.Message { // Deprecated: Use Topic.ProtoReflect.Descriptor instead. func (*Topic) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5} } func (x *Topic) GetName() string { @@ -773,7 +1075,7 @@ type PubsubMessage struct { func (x *PubsubMessage) Reset() { *x = PubsubMessage{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -786,7 +1088,7 @@ func (x *PubsubMessage) String() string { func (*PubsubMessage) ProtoMessage() {} func (x *PubsubMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -799,7 +1101,7 @@ func (x *PubsubMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use PubsubMessage.ProtoReflect.Descriptor instead. func (*PubsubMessage) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6} } func (x *PubsubMessage) GetData() []byte { @@ -851,7 +1153,7 @@ type GetTopicRequest struct { func (x *GetTopicRequest) Reset() { *x = GetTopicRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -864,7 +1166,7 @@ func (x *GetTopicRequest) String() string { func (*GetTopicRequest) ProtoMessage() {} func (x *GetTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -877,7 +1179,7 @@ func (x *GetTopicRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTopicRequest.ProtoReflect.Descriptor instead. func (*GetTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7} } func (x *GetTopicRequest) GetTopic() string { @@ -906,7 +1208,7 @@ type UpdateTopicRequest struct { func (x *UpdateTopicRequest) Reset() { *x = UpdateTopicRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -919,7 +1221,7 @@ func (x *UpdateTopicRequest) String() string { func (*UpdateTopicRequest) ProtoMessage() {} func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -932,7 +1234,7 @@ func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateTopicRequest.ProtoReflect.Descriptor instead. func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8} } func (x *UpdateTopicRequest) GetTopic() *Topic { @@ -965,7 +1267,7 @@ type PublishRequest struct { func (x *PublishRequest) Reset() { *x = PublishRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -978,7 +1280,7 @@ func (x *PublishRequest) String() string { func (*PublishRequest) ProtoMessage() {} func (x *PublishRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -991,7 +1293,7 @@ func (x *PublishRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. func (*PublishRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9} } func (x *PublishRequest) GetTopic() string { @@ -1023,7 +1325,7 @@ type PublishResponse struct { func (x *PublishResponse) Reset() { *x = PublishResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1036,7 +1338,7 @@ func (x *PublishResponse) String() string { func (*PublishResponse) ProtoMessage() {} func (x *PublishResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1049,7 +1351,7 @@ func (x *PublishResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. func (*PublishResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10} } func (x *PublishResponse) GetMessageIds() []string { @@ -1079,7 +1381,7 @@ type ListTopicsRequest struct { func (x *ListTopicsRequest) Reset() { *x = ListTopicsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1092,7 +1394,7 @@ func (x *ListTopicsRequest) String() string { func (*ListTopicsRequest) ProtoMessage() {} func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1105,7 +1407,7 @@ func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead. func (*ListTopicsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11} } func (x *ListTopicsRequest) GetProject() string { @@ -1145,7 +1447,7 @@ type ListTopicsResponse struct { func (x *ListTopicsResponse) Reset() { *x = ListTopicsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1158,7 +1460,7 @@ func (x *ListTopicsResponse) String() string { func (*ListTopicsResponse) ProtoMessage() {} func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1171,7 +1473,7 @@ func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead. func (*ListTopicsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12} } func (x *ListTopicsResponse) GetTopics() []*Topic { @@ -1208,7 +1510,7 @@ type ListTopicSubscriptionsRequest struct { func (x *ListTopicSubscriptionsRequest) Reset() { *x = ListTopicSubscriptionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1221,7 +1523,7 @@ func (x *ListTopicSubscriptionsRequest) String() string { func (*ListTopicSubscriptionsRequest) ProtoMessage() {} func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1234,7 +1536,7 @@ func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicSubscriptionsRequest.ProtoReflect.Descriptor instead. func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13} } func (x *ListTopicSubscriptionsRequest) GetTopic() string { @@ -1276,7 +1578,7 @@ type ListTopicSubscriptionsResponse struct { func (x *ListTopicSubscriptionsResponse) Reset() { *x = ListTopicSubscriptionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1289,7 +1591,7 @@ func (x *ListTopicSubscriptionsResponse) String() string { func (*ListTopicSubscriptionsResponse) ProtoMessage() {} func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1302,7 +1604,7 @@ func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicSubscriptionsResponse.ProtoReflect.Descriptor instead. func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14} } func (x *ListTopicSubscriptionsResponse) GetSubscriptions() []string { @@ -1339,7 +1641,7 @@ type ListTopicSnapshotsRequest struct { func (x *ListTopicSnapshotsRequest) Reset() { *x = ListTopicSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1352,7 +1654,7 @@ func (x *ListTopicSnapshotsRequest) String() string { func (*ListTopicSnapshotsRequest) ProtoMessage() {} func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1365,7 +1667,7 @@ func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListTopicSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15} } func (x *ListTopicSnapshotsRequest) GetTopic() string { @@ -1406,7 +1708,7 @@ type ListTopicSnapshotsResponse struct { func (x *ListTopicSnapshotsResponse) Reset() { *x = ListTopicSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1419,7 +1721,7 @@ func (x *ListTopicSnapshotsResponse) String() string { func (*ListTopicSnapshotsResponse) ProtoMessage() {} func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1432,7 +1734,7 @@ func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTopicSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListTopicSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16} } func (x *ListTopicSnapshotsResponse) GetSnapshots() []string { @@ -1463,7 +1765,7 @@ type DeleteTopicRequest struct { func (x *DeleteTopicRequest) Reset() { *x = DeleteTopicRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1476,7 +1778,7 @@ func (x *DeleteTopicRequest) String() string { func (*DeleteTopicRequest) ProtoMessage() {} func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1489,7 +1791,7 @@ func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17} } func (x *DeleteTopicRequest) GetTopic() string { @@ -1513,7 +1815,7 @@ type DetachSubscriptionRequest struct { func (x *DetachSubscriptionRequest) Reset() { *x = DetachSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1526,7 +1828,7 @@ func (x *DetachSubscriptionRequest) String() string { func (*DetachSubscriptionRequest) ProtoMessage() {} func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1539,7 +1841,7 @@ func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DetachSubscriptionRequest.ProtoReflect.Descriptor instead. func (*DetachSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18} } func (x *DetachSubscriptionRequest) GetSubscription() string { @@ -1560,7 +1862,7 @@ type DetachSubscriptionResponse struct { func (x *DetachSubscriptionResponse) Reset() { *x = DetachSubscriptionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1573,7 +1875,7 @@ func (x *DetachSubscriptionResponse) String() string { func (*DetachSubscriptionResponse) ProtoMessage() {} func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1586,7 +1888,7 @@ func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DetachSubscriptionResponse.ProtoReflect.Descriptor instead. func (*DetachSubscriptionResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19} } // A subscription resource. If none of `push_config`, `bigquery_config`, or @@ -1649,7 +1951,7 @@ type Subscription struct { // backlog, from the moment a message is published. If `retain_acked_messages` // is true, then this also configures the retention of acknowledged messages, // and thus configures how far back in time a `Seek` can be done. Defaults to - // 7 days. Cannot be more than 7 days or less than 10 minutes. + // 7 days. Cannot be more than 31 days or less than 10 minutes. MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` // Optional. See [Creating and managing // labels](https://cloud.google.com/pubsub/docs/labels). @@ -1719,12 +2021,15 @@ type Subscription struct { // Output only. An output-only field indicating whether or not the // subscription can receive messages. State Subscription_State `protobuf:"varint,19,opt,name=state,proto3,enum=google.pubsub.v1.Subscription_State" json:"state,omitempty"` + // Output only. Information about the associated Analytics Hub subscription. + // Only set if the subscritpion is created by Analytics Hub. + AnalyticsHubSubscriptionInfo *Subscription_AnalyticsHubSubscriptionInfo `protobuf:"bytes,23,opt,name=analytics_hub_subscription_info,json=analyticsHubSubscriptionInfo,proto3" json:"analytics_hub_subscription_info,omitempty"` } func (x *Subscription) Reset() { *x = Subscription{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1737,7 +2042,7 @@ func (x *Subscription) String() string { func (*Subscription) ProtoMessage() {} func (x *Subscription) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1750,7 +2055,7 @@ func (x *Subscription) ProtoReflect() protoreflect.Message { // Deprecated: Use Subscription.ProtoReflect.Descriptor instead. func (*Subscription) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20} } func (x *Subscription) GetName() string { @@ -1879,6 +2184,13 @@ func (x *Subscription) GetState() Subscription_State { return Subscription_STATE_UNSPECIFIED } +func (x *Subscription) GetAnalyticsHubSubscriptionInfo() *Subscription_AnalyticsHubSubscriptionInfo { + if x != nil { + return x.AnalyticsHubSubscriptionInfo + } + return nil +} + // A policy that specifies how Pub/Sub retries message delivery. // // Retry delay will be exponential based on provided minimum and maximum @@ -1907,7 +2219,7 @@ type RetryPolicy struct { func (x *RetryPolicy) Reset() { *x = RetryPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1920,7 +2232,7 @@ func (x *RetryPolicy) String() string { func (*RetryPolicy) ProtoMessage() {} func (x *RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1933,7 +2245,7 @@ func (x *RetryPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. func (*RetryPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21} } func (x *RetryPolicy) GetMinimumBackoff() *durationpb.Duration { @@ -1989,7 +2301,7 @@ type DeadLetterPolicy struct { func (x *DeadLetterPolicy) Reset() { *x = DeadLetterPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2002,7 +2314,7 @@ func (x *DeadLetterPolicy) String() string { func (*DeadLetterPolicy) ProtoMessage() {} func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2015,7 +2327,7 @@ func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use DeadLetterPolicy.ProtoReflect.Descriptor instead. func (*DeadLetterPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} } func (x *DeadLetterPolicy) GetDeadLetterTopic() string { @@ -2051,7 +2363,7 @@ type ExpirationPolicy struct { func (x *ExpirationPolicy) Reset() { *x = ExpirationPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2064,7 +2376,7 @@ func (x *ExpirationPolicy) String() string { func (*ExpirationPolicy) ProtoMessage() {} func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2077,7 +2389,7 @@ func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use ExpirationPolicy.ProtoReflect.Descriptor instead. func (*ExpirationPolicy) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} } func (x *ExpirationPolicy) GetTtl() *durationpb.Duration { @@ -2141,7 +2453,7 @@ type PushConfig struct { func (x *PushConfig) Reset() { *x = PushConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2154,7 +2466,7 @@ func (x *PushConfig) String() string { func (*PushConfig) ProtoMessage() {} func (x *PushConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2167,7 +2479,7 @@ func (x *PushConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use PushConfig.ProtoReflect.Descriptor instead. func (*PushConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} } func (x *PushConfig) GetPushEndpoint() string { @@ -2296,7 +2608,7 @@ type BigQueryConfig struct { func (x *BigQueryConfig) Reset() { *x = BigQueryConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2309,7 +2621,7 @@ func (x *BigQueryConfig) String() string { func (*BigQueryConfig) ProtoMessage() {} func (x *BigQueryConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2322,7 +2634,7 @@ func (x *BigQueryConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use BigQueryConfig.ProtoReflect.Descriptor instead. func (*BigQueryConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} } func (x *BigQueryConfig) GetTable() string { @@ -2429,7 +2741,7 @@ type CloudStorageConfig struct { func (x *CloudStorageConfig) Reset() { *x = CloudStorageConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2442,7 +2754,7 @@ func (x *CloudStorageConfig) String() string { func (*CloudStorageConfig) ProtoMessage() {} func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2455,7 +2767,7 @@ func (x *CloudStorageConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use CloudStorageConfig.ProtoReflect.Descriptor instead. func (*CloudStorageConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} } func (x *CloudStorageConfig) GetBucket() string { @@ -2594,7 +2906,7 @@ type ReceivedMessage struct { func (x *ReceivedMessage) Reset() { *x = ReceivedMessage{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2607,7 +2919,7 @@ func (x *ReceivedMessage) String() string { func (*ReceivedMessage) ProtoMessage() {} func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2620,7 +2932,7 @@ func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { // Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead. func (*ReceivedMessage) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} } func (x *ReceivedMessage) GetAckId() string { @@ -2658,7 +2970,7 @@ type GetSubscriptionRequest struct { func (x *GetSubscriptionRequest) Reset() { *x = GetSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2671,7 +2983,7 @@ func (x *GetSubscriptionRequest) String() string { func (*GetSubscriptionRequest) ProtoMessage() {} func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2684,7 +2996,7 @@ func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} } func (x *GetSubscriptionRequest) GetSubscription() string { @@ -2710,7 +3022,7 @@ type UpdateSubscriptionRequest struct { func (x *UpdateSubscriptionRequest) Reset() { *x = UpdateSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2723,7 +3035,7 @@ func (x *UpdateSubscriptionRequest) String() string { func (*UpdateSubscriptionRequest) ProtoMessage() {} func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2736,7 +3048,7 @@ func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead. func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} } func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription { @@ -2773,7 +3085,7 @@ type ListSubscriptionsRequest struct { func (x *ListSubscriptionsRequest) Reset() { *x = ListSubscriptionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2786,7 +3098,7 @@ func (x *ListSubscriptionsRequest) String() string { func (*ListSubscriptionsRequest) ProtoMessage() {} func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2799,7 +3111,7 @@ func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead. func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} } func (x *ListSubscriptionsRequest) GetProject() string { @@ -2840,7 +3152,7 @@ type ListSubscriptionsResponse struct { func (x *ListSubscriptionsResponse) Reset() { *x = ListSubscriptionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2853,7 +3165,7 @@ func (x *ListSubscriptionsResponse) String() string { func (*ListSubscriptionsResponse) ProtoMessage() {} func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2866,7 +3178,7 @@ func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead. func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} } func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { @@ -2897,7 +3209,7 @@ type DeleteSubscriptionRequest struct { func (x *DeleteSubscriptionRequest) Reset() { *x = DeleteSubscriptionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2910,7 +3222,7 @@ func (x *DeleteSubscriptionRequest) String() string { func (*DeleteSubscriptionRequest) ProtoMessage() {} func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2923,7 +3235,7 @@ func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead. func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} } func (x *DeleteSubscriptionRequest) GetSubscription() string { @@ -2954,7 +3266,7 @@ type ModifyPushConfigRequest struct { func (x *ModifyPushConfigRequest) Reset() { *x = ModifyPushConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2967,7 +3279,7 @@ func (x *ModifyPushConfigRequest) String() string { func (*ModifyPushConfigRequest) ProtoMessage() {} func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2980,7 +3292,7 @@ func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead. func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} } func (x *ModifyPushConfigRequest) GetSubscription() string { @@ -3025,7 +3337,7 @@ type PullRequest struct { func (x *PullRequest) Reset() { *x = PullRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3038,7 +3350,7 @@ func (x *PullRequest) String() string { func (*PullRequest) ProtoMessage() {} func (x *PullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3051,7 +3363,7 @@ func (x *PullRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. func (*PullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} } func (x *PullRequest) GetSubscription() string { @@ -3093,7 +3405,7 @@ type PullResponse struct { func (x *PullResponse) Reset() { *x = PullResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3106,7 +3418,7 @@ func (x *PullResponse) String() string { func (*PullResponse) ProtoMessage() {} func (x *PullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3119,7 +3431,7 @@ func (x *PullResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PullResponse.ProtoReflect.Descriptor instead. func (*PullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} } func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage { @@ -3155,7 +3467,7 @@ type ModifyAckDeadlineRequest struct { func (x *ModifyAckDeadlineRequest) Reset() { *x = ModifyAckDeadlineRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3168,7 +3480,7 @@ func (x *ModifyAckDeadlineRequest) String() string { func (*ModifyAckDeadlineRequest) ProtoMessage() {} func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3181,7 +3493,7 @@ func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead. func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} } func (x *ModifyAckDeadlineRequest) GetSubscription() string { @@ -3223,7 +3535,7 @@ type AcknowledgeRequest struct { func (x *AcknowledgeRequest) Reset() { *x = AcknowledgeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3236,7 +3548,7 @@ func (x *AcknowledgeRequest) String() string { func (*AcknowledgeRequest) ProtoMessage() {} func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3249,7 +3561,7 @@ func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead. func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} } func (x *AcknowledgeRequest) GetSubscription() string { @@ -3341,7 +3653,7 @@ type StreamingPullRequest struct { func (x *StreamingPullRequest) Reset() { *x = StreamingPullRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3354,7 +3666,7 @@ func (x *StreamingPullRequest) String() string { func (*StreamingPullRequest) ProtoMessage() {} func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3367,7 +3679,7 @@ func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead. func (*StreamingPullRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} } func (x *StreamingPullRequest) GetSubscription() string { @@ -3448,7 +3760,7 @@ type StreamingPullResponse struct { func (x *StreamingPullResponse) Reset() { *x = StreamingPullResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3461,7 +3773,7 @@ func (x *StreamingPullResponse) String() string { func (*StreamingPullResponse) ProtoMessage() {} func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3474,7 +3786,7 @@ func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead. func (*StreamingPullResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} } func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { @@ -3538,7 +3850,7 @@ type CreateSnapshotRequest struct { func (x *CreateSnapshotRequest) Reset() { *x = CreateSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3551,7 +3863,7 @@ func (x *CreateSnapshotRequest) String() string { func (*CreateSnapshotRequest) ProtoMessage() {} func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3564,7 +3876,7 @@ func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} } func (x *CreateSnapshotRequest) GetName() string { @@ -3604,7 +3916,7 @@ type UpdateSnapshotRequest struct { func (x *UpdateSnapshotRequest) Reset() { *x = UpdateSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3617,7 +3929,7 @@ func (x *UpdateSnapshotRequest) String() string { func (*UpdateSnapshotRequest) ProtoMessage() {} func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3630,7 +3942,7 @@ func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} } func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot { @@ -3681,7 +3993,7 @@ type Snapshot struct { func (x *Snapshot) Reset() { *x = Snapshot{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3694,7 +4006,7 @@ func (x *Snapshot) String() string { func (*Snapshot) ProtoMessage() {} func (x *Snapshot) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3707,7 +4019,7 @@ func (x *Snapshot) ProtoReflect() protoreflect.Message { // Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. func (*Snapshot) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} } func (x *Snapshot) GetName() string { @@ -3752,7 +4064,7 @@ type GetSnapshotRequest struct { func (x *GetSnapshotRequest) Reset() { *x = GetSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3765,7 +4077,7 @@ func (x *GetSnapshotRequest) String() string { func (*GetSnapshotRequest) ProtoMessage() {} func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3778,7 +4090,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} } func (x *GetSnapshotRequest) GetSnapshot() string { @@ -3808,7 +4120,7 @@ type ListSnapshotsRequest struct { func (x *ListSnapshotsRequest) Reset() { *x = ListSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3821,7 +4133,7 @@ func (x *ListSnapshotsRequest) String() string { func (*ListSnapshotsRequest) ProtoMessage() {} func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3834,7 +4146,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44} } func (x *ListSnapshotsRequest) GetProject() string { @@ -3875,7 +4187,7 @@ type ListSnapshotsResponse struct { func (x *ListSnapshotsResponse) Reset() { *x = ListSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3888,7 +4200,7 @@ func (x *ListSnapshotsResponse) String() string { func (*ListSnapshotsResponse) ProtoMessage() {} func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[43] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3901,7 +4213,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{43} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45} } func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { @@ -3932,7 +4244,7 @@ type DeleteSnapshotRequest struct { func (x *DeleteSnapshotRequest) Reset() { *x = DeleteSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3945,7 +4257,7 @@ func (x *DeleteSnapshotRequest) String() string { func (*DeleteSnapshotRequest) ProtoMessage() {} func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[44] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3958,7 +4270,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{44} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46} } func (x *DeleteSnapshotRequest) GetSnapshot() string { @@ -3986,7 +4298,7 @@ type SeekRequest struct { func (x *SeekRequest) Reset() { *x = SeekRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3999,7 +4311,7 @@ func (x *SeekRequest) String() string { func (*SeekRequest) ProtoMessage() {} func (x *SeekRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[45] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4012,7 +4324,7 @@ func (x *SeekRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead. func (*SeekRequest) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{45} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{47} } func (x *SeekRequest) GetSubscription() string { @@ -4083,7 +4395,7 @@ type SeekResponse struct { func (x *SeekResponse) Reset() { *x = SeekResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4096,7 +4408,7 @@ func (x *SeekResponse) String() string { func (*SeekResponse) ProtoMessage() {} func (x *SeekResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4109,7 +4421,7 @@ func (x *SeekResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead. func (*SeekResponse) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{46} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{48} } // Ingestion settings for Amazon Kinesis Data Streams. @@ -4140,7 +4452,7 @@ type IngestionDataSourceSettings_AwsKinesis struct { func (x *IngestionDataSourceSettings_AwsKinesis) Reset() { *x = IngestionDataSourceSettings_AwsKinesis{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4153,7 +4465,7 @@ func (x *IngestionDataSourceSettings_AwsKinesis) String() string { func (*IngestionDataSourceSettings_AwsKinesis) ProtoMessage() {} func (x *IngestionDataSourceSettings_AwsKinesis) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[47] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4204,47 +4516,53 @@ func (x *IngestionDataSourceSettings_AwsKinesis) GetGcpServiceAccount() string { return "" } -// Contains information needed for generating an -// [OpenID Connect -// token](https://developers.google.com/identity/protocols/OpenIDConnect). -type PushConfig_OidcToken struct { +// Ingestion settings for Cloud Storage. +type IngestionDataSourceSettings_CloudStorage struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Optional. [Service account - // email](https://cloud.google.com/iam/docs/service-accounts) - // used for generating the OIDC token. For more information - // on setting up authentication, see - // [Push subscriptions](https://cloud.google.com/pubsub/docs/push). - ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // Optional. Audience to be used when generating OIDC token. The audience - // claim identifies the recipients that the JWT is intended for. The - // audience value is a single case-sensitive string. Having multiple values - // (array) for the audience field is not supported. More info about the OIDC - // JWT token audience here: - // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, - // the Push endpoint URL will be used. - Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` -} - -func (x *PushConfig_OidcToken) Reset() { - *x = PushConfig_OidcToken{} + // Output only. An output-only field that indicates the state of the Cloud + // Storage ingestion source. + State IngestionDataSourceSettings_CloudStorage_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.pubsub.v1.IngestionDataSourceSettings_CloudStorage_State" json:"state,omitempty"` + // Optional. Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the [bucket naming requirements] + // (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Defaults to text format. + // + // Types that are assignable to InputFormat: + // + // *IngestionDataSourceSettings_CloudStorage_TextFormat_ + // *IngestionDataSourceSettings_CloudStorage_AvroFormat_ + // *IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat + InputFormat isIngestionDataSourceSettings_CloudStorage_InputFormat `protobuf_oneof:"input_format"` + // Optional. Only objects with a larger or equal creation timestamp will be + // ingested. + MinimumObjectCreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=minimum_object_create_time,json=minimumObjectCreateTime,proto3" json:"minimum_object_create_time,omitempty"` + // Optional. Glob pattern used to match objects that will be ingested. If + // unset, all objects will be ingested. See the [supported + // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob). + MatchGlob string `protobuf:"bytes,9,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` +} + +func (x *IngestionDataSourceSettings_CloudStorage) Reset() { + *x = IngestionDataSourceSettings_CloudStorage{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PushConfig_OidcToken) String() string { +func (x *IngestionDataSourceSettings_CloudStorage) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PushConfig_OidcToken) ProtoMessage() {} +func (*IngestionDataSourceSettings_CloudStorage) ProtoMessage() {} -func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] +func (x *IngestionDataSourceSettings_CloudStorage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4255,51 +4573,594 @@ func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead. -func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 0} +// Deprecated: Use IngestionDataSourceSettings_CloudStorage.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1} } -func (x *PushConfig_OidcToken) GetServiceAccountEmail() string { +func (x *IngestionDataSourceSettings_CloudStorage) GetState() IngestionDataSourceSettings_CloudStorage_State { if x != nil { - return x.ServiceAccountEmail + return x.State } - return "" + return IngestionDataSourceSettings_CloudStorage_STATE_UNSPECIFIED } -func (x *PushConfig_OidcToken) GetAudience() string { +func (x *IngestionDataSourceSettings_CloudStorage) GetBucket() string { if x != nil { - return x.Audience + return x.Bucket } return "" } -// The payload to the push endpoint is in the form of the JSON representation -// of a PubsubMessage -// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). -type PushConfig_PubsubWrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (m *IngestionDataSourceSettings_CloudStorage) GetInputFormat() isIngestionDataSourceSettings_CloudStorage_InputFormat { + if m != nil { + return m.InputFormat + } + return nil } -func (x *PushConfig_PubsubWrapper) Reset() { - *x = PushConfig_PubsubWrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *IngestionDataSourceSettings_CloudStorage) GetTextFormat() *IngestionDataSourceSettings_CloudStorage_TextFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_TextFormat_); ok { + return x.TextFormat } + return nil } -func (x *PushConfig_PubsubWrapper) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *IngestionDataSourceSettings_CloudStorage) GetAvroFormat() *IngestionDataSourceSettings_CloudStorage_AvroFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_AvroFormat_); ok { + return x.AvroFormat + } + return nil } -func (*PushConfig_PubsubWrapper) ProtoMessage() {} - +func (x *IngestionDataSourceSettings_CloudStorage) GetPubsubAvroFormat() *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat { + if x, ok := x.GetInputFormat().(*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat); ok { + return x.PubsubAvroFormat + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetMinimumObjectCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.MinimumObjectCreateTime + } + return nil +} + +func (x *IngestionDataSourceSettings_CloudStorage) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + +type isIngestionDataSourceSettings_CloudStorage_InputFormat interface { + isIngestionDataSourceSettings_CloudStorage_InputFormat() +} + +type IngestionDataSourceSettings_CloudStorage_TextFormat_ struct { + // Optional. Data from Cloud Storage will be interpreted as text. + TextFormat *IngestionDataSourceSettings_CloudStorage_TextFormat `protobuf:"bytes,3,opt,name=text_format,json=textFormat,proto3,oneof"` +} + +type IngestionDataSourceSettings_CloudStorage_AvroFormat_ struct { + // Optional. Data from Cloud Storage will be interpreted in Avro format. + AvroFormat *IngestionDataSourceSettings_CloudStorage_AvroFormat `protobuf:"bytes,4,opt,name=avro_format,json=avroFormat,proto3,oneof"` +} + +type IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat struct { + // Optional. It will be assumed data from Cloud Storage was written via + // [Cloud Storage + // subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). + PubsubAvroFormat *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat `protobuf:"bytes,5,opt,name=pubsub_avro_format,json=pubsubAvroFormat,proto3,oneof"` +} + +func (*IngestionDataSourceSettings_CloudStorage_TextFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat_) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +func (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat) isIngestionDataSourceSettings_CloudStorage_InputFormat() { +} + +// Configuration for reading Cloud Storage data in text format. Each line of +// text as specified by the delimiter will be set to the `data` field of a +// Pub/Sub message. +type IngestionDataSourceSettings_CloudStorage_TextFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. When unset, '\n' is used. + Delimiter *string `protobuf:"bytes,1,opt,name=delimiter,proto3,oneof" json:"delimiter,omitempty"` +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_TextFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_TextFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_TextFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *IngestionDataSourceSettings_CloudStorage_TextFormat) GetDelimiter() string { + if x != nil && x.Delimiter != nil { + return *x.Delimiter + } + return "" +} + +// Configuration for reading Cloud Storage data in Avro binary format. The +// bytes of each object will be set to the `data` field of a Pub/Sub +// message. +type IngestionDataSourceSettings_CloudStorage_AvroFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_AvroFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_AvroFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_AvroFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_AvroFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 1} +} + +// Configuration for reading Cloud Storage data written via [Cloud Storage +// subscriptions](https://cloud.google.com/pubsub/docs/cloudstorage). The +// data and attributes fields of the originally exported Pub/Sub message +// will be restored when publishing. +type IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Reset() { + *x = IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoMessage() {} + +func (x *IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat.ProtoReflect.Descriptor instead. +func (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2, 1, 2} +} + +// Specifies the reason why some data may have been left out of +// the desired Pub/Sub message due to the API message limits +// (https://cloud.google.com/pubsub/quotas#resource_limits). For example, +// when the number of attributes is larger than 100, the number of +// attributes is truncated to 100 to respect the limit on the attribute count. +// Other attribute limits are treated similarly. When the size of the desired +// message would've been larger than 10MB, the message won't be published at +// all, and ingestion of the subsequent messages will proceed as normal. +type IngestionFailureEvent_ApiViolationReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_ApiViolationReason) Reset() { + *x = IngestionFailureEvent_ApiViolationReason{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionFailureEvent_ApiViolationReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_ApiViolationReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_ApiViolationReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_ApiViolationReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_ApiViolationReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 0} +} + +// Set when an Avro file is unsupported or its format is not valid. When this +// occurs, one or more Avro objects won't be ingested. +type IngestionFailureEvent_AvroFailureReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IngestionFailureEvent_AvroFailureReason) Reset() { + *x = IngestionFailureEvent_AvroFailureReason{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionFailureEvent_AvroFailureReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_AvroFailureReason) ProtoMessage() {} + +func (x *IngestionFailureEvent_AvroFailureReason) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_AvroFailureReason.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_AvroFailureReason) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 1} +} + +// Failure when ingesting from a Cloud Storage source. +type IngestionFailureEvent_CloudStorageFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Name of the Cloud Storage bucket used for ingestion. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Optional. Name of the Cloud Storage object which contained the section + // that couldn't be ingested. + ObjectName string `protobuf:"bytes,2,opt,name=object_name,json=objectName,proto3" json:"object_name,omitempty"` + // Optional. Generation of the Cloud Storage object which contained the + // section that couldn't be ingested. + ObjectGeneration int64 `protobuf:"varint,3,opt,name=object_generation,json=objectGeneration,proto3" json:"object_generation,omitempty"` + // Reason why ingestion failed for the specified object. + // + // Types that are assignable to Reason: + // + // *IngestionFailureEvent_CloudStorageFailure_AvroFailureReason + // *IngestionFailureEvent_CloudStorageFailure_ApiViolationReason + Reason isIngestionFailureEvent_CloudStorageFailure_Reason `protobuf_oneof:"reason"` +} + +func (x *IngestionFailureEvent_CloudStorageFailure) Reset() { + *x = IngestionFailureEvent_CloudStorageFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IngestionFailureEvent_CloudStorageFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IngestionFailureEvent_CloudStorageFailure) ProtoMessage() {} + +func (x *IngestionFailureEvent_CloudStorageFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IngestionFailureEvent_CloudStorageFailure.ProtoReflect.Descriptor instead. +func (*IngestionFailureEvent_CloudStorageFailure) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4, 2} +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectName() string { + if x != nil { + return x.ObjectName + } + return "" +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetObjectGeneration() int64 { + if x != nil { + return x.ObjectGeneration + } + return 0 +} + +func (m *IngestionFailureEvent_CloudStorageFailure) GetReason() isIngestionFailureEvent_CloudStorageFailure_Reason { + if m != nil { + return m.Reason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetAvroFailureReason() *IngestionFailureEvent_AvroFailureReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason); ok { + return x.AvroFailureReason + } + return nil +} + +func (x *IngestionFailureEvent_CloudStorageFailure) GetApiViolationReason() *IngestionFailureEvent_ApiViolationReason { + if x, ok := x.GetReason().(*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason); ok { + return x.ApiViolationReason + } + return nil +} + +type isIngestionFailureEvent_CloudStorageFailure_Reason interface { + isIngestionFailureEvent_CloudStorageFailure_Reason() +} + +type IngestionFailureEvent_CloudStorageFailure_AvroFailureReason struct { + // Optional. Failure encountered when parsing an Avro file. + AvroFailureReason *IngestionFailureEvent_AvroFailureReason `protobuf:"bytes,5,opt,name=avro_failure_reason,json=avroFailureReason,proto3,oneof"` +} + +type IngestionFailureEvent_CloudStorageFailure_ApiViolationReason struct { + // Optional. The Pub/Sub API limits prevented the desired message from + // being published. + ApiViolationReason *IngestionFailureEvent_ApiViolationReason `protobuf:"bytes,6,opt,name=api_violation_reason,json=apiViolationReason,proto3,oneof"` +} + +func (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +func (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason) isIngestionFailureEvent_CloudStorageFailure_Reason() { +} + +// Information about an associated Analytics Hub subscription +// (https://cloud.google.com/bigquery/docs/analytics-hub-manage-subscriptions). +type Subscription_AnalyticsHubSubscriptionInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. The name of the associated Analytics Hub listing resource. + // Pattern: + // "projects/{project}/locations/{location}/dataExchanges/{data_exchange}/listings/{listing}" + Listing string `protobuf:"bytes,1,opt,name=listing,proto3" json:"listing,omitempty"` + // Optional. The name of the associated Analytics Hub subscription resource. + // Pattern: + // "projects/{project}/locations/{location}/subscriptions/{subscription}" + Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) Reset() { + *x = Subscription_AnalyticsHubSubscriptionInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subscription_AnalyticsHubSubscriptionInfo) ProtoMessage() {} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subscription_AnalyticsHubSubscriptionInfo.ProtoReflect.Descriptor instead. +func (*Subscription_AnalyticsHubSubscriptionInfo) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) GetListing() string { + if x != nil { + return x.Listing + } + return "" +} + +func (x *Subscription_AnalyticsHubSubscriptionInfo) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +type PushConfig_OidcToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. [Service account + // email](https://cloud.google.com/iam/docs/service-accounts) + // used for generating the OIDC token. For more information + // on setting up authentication, see + // [Push subscriptions](https://cloud.google.com/pubsub/docs/push). + ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // Optional. Audience to be used when generating OIDC token. The audience + // claim identifies the recipients that the JWT is intended for. The + // audience value is a single case-sensitive string. Having multiple values + // (array) for the audience field is not supported. More info about the OIDC + // JWT token audience here: + // https://tools.ietf.org/html/rfc7519#section-4.1.3 Note: if not specified, + // the Push endpoint URL will be used. + Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` +} + +func (x *PushConfig_OidcToken) Reset() { + *x = PushConfig_OidcToken{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushConfig_OidcToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_OidcToken) ProtoMessage() {} + +func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead. +func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0} +} + +func (x *PushConfig_OidcToken) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *PushConfig_OidcToken) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +// The payload to the push endpoint is in the form of the JSON representation +// of a PubsubMessage +// (https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage). +type PushConfig_PubsubWrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PushConfig_PubsubWrapper) Reset() { + *x = PushConfig_PubsubWrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushConfig_PubsubWrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_PubsubWrapper) ProtoMessage() {} + func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[52] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4312,7 +5173,7 @@ func (x *PushConfig_PubsubWrapper) ProtoReflect() protoreflect.Message { // Deprecated: Use PushConfig_PubsubWrapper.ProtoReflect.Descriptor instead. func (*PushConfig_PubsubWrapper) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 1} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 1} } // Sets the `data` field as the HTTP body for delivery. @@ -4330,7 +5191,7 @@ type PushConfig_NoWrapper struct { func (x *PushConfig_NoWrapper) Reset() { *x = PushConfig_NoWrapper{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4343,7 +5204,7 @@ func (x *PushConfig_NoWrapper) String() string { func (*PushConfig_NoWrapper) ProtoMessage() {} func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[53] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4356,7 +5217,7 @@ func (x *PushConfig_NoWrapper) ProtoReflect() protoreflect.Message { // Deprecated: Use PushConfig_NoWrapper.ProtoReflect.Descriptor instead. func (*PushConfig_NoWrapper) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22, 2} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 2} } func (x *PushConfig_NoWrapper) GetWriteMetadata() bool { @@ -4378,7 +5239,7 @@ type CloudStorageConfig_TextConfig struct { func (x *CloudStorageConfig_TextConfig) Reset() { *x = CloudStorageConfig_TextConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4391,7 +5252,7 @@ func (x *CloudStorageConfig_TextConfig) String() string { func (*CloudStorageConfig_TextConfig) ProtoMessage() {} func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[55] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4404,7 +5265,7 @@ func (x *CloudStorageConfig_TextConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use CloudStorageConfig_TextConfig.ProtoReflect.Descriptor instead. func (*CloudStorageConfig_TextConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 0} } // Configuration for writing message data in Avro format. @@ -4429,7 +5290,7 @@ type CloudStorageConfig_AvroConfig struct { func (x *CloudStorageConfig_AvroConfig) Reset() { *x = CloudStorageConfig_AvroConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4442,7 +5303,7 @@ func (x *CloudStorageConfig_AvroConfig) String() string { func (*CloudStorageConfig_AvroConfig) ProtoMessage() {} func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[56] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4455,7 +5316,7 @@ func (x *CloudStorageConfig_AvroConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use CloudStorageConfig_AvroConfig.ProtoReflect.Descriptor instead. func (*CloudStorageConfig_AvroConfig) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24, 1} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26, 1} } func (x *CloudStorageConfig_AvroConfig) GetWriteMetadata() bool { @@ -4494,7 +5355,7 @@ type StreamingPullResponse_AcknowledgeConfirmation struct { func (x *StreamingPullResponse_AcknowledgeConfirmation) Reset() { *x = StreamingPullResponse_AcknowledgeConfirmation{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4507,7 +5368,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) String() string { func (*StreamingPullResponse_AcknowledgeConfirmation) ProtoMessage() {} func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[57] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4520,7 +5381,7 @@ func (x *StreamingPullResponse_AcknowledgeConfirmation) ProtoReflect() protorefl // Deprecated: Use StreamingPullResponse_AcknowledgeConfirmation.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_AcknowledgeConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 0} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 0} } func (x *StreamingPullResponse_AcknowledgeConfirmation) GetAckIds() []string { @@ -4571,7 +5432,7 @@ type StreamingPullResponse_ModifyAckDeadlineConfirmation struct { func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) Reset() { *x = StreamingPullResponse_ModifyAckDeadlineConfirmation{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4584,7 +5445,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) String() string { func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoMessage() {} func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[58] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4597,7 +5458,7 @@ func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) ProtoReflect() pro // Deprecated: Use StreamingPullResponse_ModifyAckDeadlineConfirmation.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_ModifyAckDeadlineConfirmation) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 1} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 1} } func (x *StreamingPullResponse_ModifyAckDeadlineConfirmation) GetAckIds() []string { @@ -4637,7 +5498,7 @@ type StreamingPullResponse_SubscriptionProperties struct { func (x *StreamingPullResponse_SubscriptionProperties) Reset() { *x = StreamingPullResponse_SubscriptionProperties{} if protoimpl.UnsafeEnabled { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4650,7 +5511,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) String() string { func (*StreamingPullResponse_SubscriptionProperties) ProtoMessage() {} func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protoreflect.Message { - mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[59] + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4663,7 +5524,7 @@ func (x *StreamingPullResponse_SubscriptionProperties) ProtoReflect() protorefle // Deprecated: Use StreamingPullResponse_SubscriptionProperties.ProtoReflect.Descriptor instead. func (*StreamingPullResponse_SubscriptionProperties) Descriptor() ([]byte, []int) { - return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37, 2} + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39, 2} } func (x *StreamingPullResponse_SubscriptionProperties) GetExactlyOnceDeliveryEnabled() bool { @@ -4727,7 +5588,7 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ 0x6e, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x22, 0xb4, 0x04, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x22, 0x80, 0x0d, 0x0a, 0x1b, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x60, 0x0a, 0x0b, 0x61, 0x77, 0x73, 0x5f, 0x6b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -4735,1021 +5596,1158 @@ var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x4b, 0x69, 0x6e, - 0x65, 0x73, 0x69, 0x73, 0x1a, 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, - 0x73, 0x69, 0x73, 0x12, 0x59, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, - 0x0a, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, - 0x72, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61, - 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, - 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, - 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x11, 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, - 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50, - 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, - 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, - 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55, - 0x4d, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x42, - 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xd2, 0x06, 0x0a, 0x05, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x65, 0x73, 0x69, 0x73, 0x12, 0x66, 0x0a, 0x0d, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0c, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x61, 0x0a, 0x16, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x61, - 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x6d, - 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, - 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, - 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x69, 0x6e, - 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, - 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, - 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x49, 0x4e, - 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x54, 0xea, 0x41, 0x51, 0x0a, 0x1b, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x12, 0x0f, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x22, 0xc3, - 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x54, 0x0a, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x1a, + 0xa8, 0x03, 0x0a, 0x0a, 0x41, 0x77, 0x73, 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x12, 0x59, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x3d, - 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, - 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, - 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, - 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, - 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x41, 0x77, 0x73, + 0x4b, 0x69, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x72, 0x6e, 0x12, 0x26, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x72, 0x41, 0x72, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x61, 0x77, 0x73, 0x5f, 0x72, 0x6f, 0x6c, + 0x65, 0x5f, 0x61, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x61, 0x77, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x41, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x67, 0x63, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, + 0x67, 0x63, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x96, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1d, + 0x0a, 0x19, 0x4b, 0x49, 0x4e, 0x45, 0x53, 0x49, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, + 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, + 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x1a, 0xfe, 0x06, 0x0a, 0x0c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x5b, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x6d, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, + 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x80, 0x01, 0x0a, 0x12, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x61, + 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x75, 0x62, + 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x48, 0x00, 0x52, 0x10, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, + 0x6f, 0x62, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x1a, 0x42, 0x0a, 0x0a, 0x54, 0x65, 0x78, 0x74, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x26, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, + 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x0c, 0x0a, 0x0a, + 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x12, 0x0a, 0x10, 0x50, 0x75, + 0x62, 0x53, 0x75, 0x62, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9a, + 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x43, + 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, + 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x50, 0x45, 0x52, 0x4d, + 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x14, 0x0a, 0x10, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, 0x4e, + 0x59, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x10, 0x05, 0x42, 0x0e, 0x0a, 0x0c, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x50, + 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x4c, 0x6f, 0x67, 0x73, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, + 0x22, 0x5f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x02, 0x12, + 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, + 0x05, 0x22, 0x88, 0x05, 0x0a, 0x15, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x76, 0x0a, 0x15, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x1a, 0x14, 0x0a, 0x12, 0x41, 0x70, 0x69, 0x56, + 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x13, + 0x0a, 0x11, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x1a, 0xfb, 0x02, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, + 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x70, 0x0a, 0x13, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, + 0x11, 0x61, 0x76, 0x72, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x73, 0x0a, 0x14, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x69, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x48, 0x00, 0x52, 0x12, 0x61, 0x70, 0x69, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x22, 0xd2, 0x06, 0x0a, + 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x40, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x73, + 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, + 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x77, 0x0a, + 0x1e, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x69, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x48, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, + 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, + 0x18, 0x49, 0x4e, 0x47, 0x45, 0x53, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, + 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x54, 0xea, 0x41, 0x51, + 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, + 0x12, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x5f, 0x22, 0xc3, 0x02, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x54, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, + 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x26, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x73, 0x22, 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x11, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0xa0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x01, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, - 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, - 0x37, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, - 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, + 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x22, 0x77, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, - 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x01, 0x0a, - 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x9f, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x01, 0xfa, 0x41, - 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, + 0x6b, 0x65, 0x6e, 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x6c, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, - 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4f, - 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, - 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, - 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x0b, 0x0a, 0x0c, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, + 0x6e, 0x22, 0x4f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x1c, 0x0a, 0x1a, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x0d, + 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x72, 0x65, + 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, + 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x47, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x12, 0x64, 0x65, + 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, + 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x1c, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, + 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63, + 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x12, + 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x1f, 0x61, 0x6e, + 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x5f, 0x68, 0x75, 0x62, 0x5f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, 0x48, 0x75, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1c, 0x61, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, + 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x1a, 0x66, 0x0a, 0x1c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x74, 0x69, 0x63, 0x73, + 0x48, 0x75, 0x62, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6c, 0x69, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x12, 0x27, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, - 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0f, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x62, 0x69, 0x67, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x14, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, - 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x72, 0x65, 0x74, - 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x12, 0x5c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, - 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, - 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, - 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x64, 0x65, - 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x45, - 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, - 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, - 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, - 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x12, 0x67, 0x0a, 0x20, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x1d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x3e, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x12, 0x0a, - 0x0e, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, - 0x02, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x9f, 0x01, 0x0a, 0x0b, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x47, 0x0a, 0x0f, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x22, 0x7c, 0x0a, - 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, 0x65, 0x72, - 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, - 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76, - 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0x44, 0x0a, 0x10, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x03, 0x74, 0x74, - 0x6c, 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x70, 0x75, - 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4c, 0x0a, - 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, - 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x0e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, + 0x22, 0x9f, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, + 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, + 0x66, 0x66, 0x22, 0x7c, 0x0a, 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2f, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, + 0x65, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, + 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, + 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x22, 0x44, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x93, 0x05, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0c, 0x70, 0x75, 0x73, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x51, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, - 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f, 0x5f, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, - 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e, 0x6f, 0x57, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x75, 0x64, - 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x0f, 0x0a, 0x0d, 0x50, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x09, 0x4e, - 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x09, 0x0a, 0x07, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a, 0x0e, 0x42, 0x69, 0x67, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, - 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x58, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, + 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x6f, + 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x6f, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x09, 0x6e, + 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x1a, 0x65, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, + 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, + 0x0f, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x1a, 0x37, 0x0a, 0x09, 0x4e, 0x6f, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x2a, 0x0a, + 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x42, 0x09, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x22, 0xf3, 0x03, 0x0a, + 0x0e, 0x42, 0x69, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x19, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, + 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x75, 0x6e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x55, 0x6e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x67, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, + 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x15, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, + 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a, + 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x10, 0x05, 0x22, 0xa0, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x12, 0x3d, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x57, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, + 0x74, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, + 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x12, 0x45, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, - 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, - 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, - 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x04, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, - 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x22, 0xa0, 0x07, - 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x2c, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x3d, 0x0a, - 0x18, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x44, 0x61, - 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x57, 0x0a, 0x0b, - 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0b, 0x61, 0x76, 0x72, 0x6f, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x48, 0x00, 0x52, 0x0a, 0x61, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, - 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x20, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, - 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x0c, 0x0a, 0x0a, 0x54, - 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67, 0x0a, 0x0a, 0x41, 0x76, 0x72, - 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, - 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x4e, - 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x53, - 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x43, - 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x05, 0x42, - 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64, - 0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, - 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x19, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6b, 0x0a, 0x19, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x4d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, - 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x70, 0x75, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x12, 0x26, - 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, - 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x18, - 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, - 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, 0x01, - 0x0a, 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, - 0x64, 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, - 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b, 0x0a, 0x17, 0x6d, 0x6f, 0x64, - 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, - 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x6d, 0x6f, - 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x6b, 0x49, - 0x64, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x63, 0x6b, - 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x18, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x6f, - 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x13, 0x6d, 0x61, 0x78, - 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x72, - 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, - 0x7f, 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x61, 0x63, 0x6b, 0x5f, - 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, - 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, - 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, - 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x11, 0x75, - 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x75, 0x6e, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0xa8, 0x01, 0x0a, 0x1d, 0x4d, - 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, - 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, - 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x5f, 0x6f, 0x6e, 0x63, 0x65, - 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, 0x65, 0x78, - 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, - 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x18, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x1a, 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x78, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x67, + 0x0a, 0x0a, 0x41, 0x76, 0x72, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x0e, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x8a, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, + 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4e, + 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x49, 0x4e, + 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x49, 0x54, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, + 0x13, 0x0a, 0x0f, 0x53, 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, + 0x43, 0x48, 0x10, 0x05, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x61, 0x63, 0x6b, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, + 0x61, 0x63, 0x6b, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x10, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, + 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, + 0x74, 0x65, 0x6d, 0x70, 0x74, 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0xa6, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x19, 0x4c, + 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, + 0x0a, 0x17, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, - 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, + 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, + 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, - 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x15, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x73, 0x6b, 0x22, 0xee, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x1d, - 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4c, 0xea, 0x41, 0x49, 0x0a, 0x1e, 0x70, 0x75, 0x62, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, + 0x12, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x18, 0x01, + 0x52, 0x11, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, + 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0c, 0x50, + 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xab, - 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x83, 0x01, 0x0a, - 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, - 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, - 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x20, + 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, + 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, + 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, + 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x22, 0xdb, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3b, + 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3a, 0x0a, 0x17, 0x6d, + 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, + 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x14, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, + 0x65, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x18, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x09, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3d, 0x0a, + 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, + 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xa4, 0x08, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x53, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x7f, 0x0a, 0x18, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x17, 0x61, 0x63, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x93, 0x01, 0x0a, 0x20, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7c, 0x0a, 0x17, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x16, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xd3, 0x01, 0x0a, 0x17, 0x41, 0x63, + 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, + 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, + 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, + 0x12, 0x2f, 0x0a, 0x11, 0x75, 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x63, + 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, + 0x52, 0x0f, 0x75, 0x6e, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, + 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, + 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, + 0x61, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, + 0xa8, 0x01, 0x0a, 0x1d, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, + 0x2b, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x69, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x18, + 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x15, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x46, 0x61, + 0x69, 0x6c, 0x65, 0x64, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x16, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x1d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, + 0x5f, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x1a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x6c, 0x79, 0x4f, 0x6e, 0x63, 0x65, 0x44, 0x65, + 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3d, 0x0a, + 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xb0, 0x02, 0x0a, + 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, + 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x96, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xee, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, + 0x41, 0x01, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x4c, 0xea, 0x41, 0x49, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x1a, 0x17, 0x2e, 0x67, + 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x7b, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x22, 0xab, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6e, 0x65, + 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x22, 0xe4, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, + 0x41, 0x01, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x00, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, + 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb8, 0x0b, 0x0a, 0x09, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, + 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, + 0x12, 0x93, 0x01, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x43, 0xda, 0x41, 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f, + 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, + 0x8a, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a, + 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69, + 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x21, 0x2a, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, + 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, + 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x62, 0x65, 0x72, 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41, + 0x2b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a, + 0x0f, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x12, 0xbb, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x43, 0xda, 0x41, 0x11, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x29, 0x3a, 0x01, 0x2a, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x07, - 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, - 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, - 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, - 0x68, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x2f, 0xda, 0x41, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8a, 0x01, 0x0a, 0x0a, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6, + 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda, + 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, + 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xaa, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, - 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, - 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2f, - 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x2a, 0x1f, - 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, - 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x7b, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x1a, - 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x32, 0xd2, 0x15, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, - 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5e, 0xda, 0x41, 0x2b, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, - 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa1, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, - 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbb, 0x01, 0x0a, 0x12, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x58, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x44, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa6, 0x01, 0x0a, 0x11, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, 0x76, 0x31, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44, - 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, - 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, - 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x76, - 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, - 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x6e, 0x6f, - 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, - 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, - 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0xda, 0x41, 0x2c, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x2c, 0x6d, - 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0xda, 0x41, 0x19, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, - 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, - 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xbb, 0x01, 0x0a, - 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, - 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, + 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x76, 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, 0x63, 0x6b, + 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x01, 0x2a, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, - 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x38, 0xda, 0x41, - 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, - 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, 0x41, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, - 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3, 0x01, 0x0a, 0x0e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x0b, + 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x5b, 0xda, 0x41, 0x14, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, + 0x64, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, + 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x12, 0xd0, 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, + 0x01, 0xda, 0x41, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x2c, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x6c, 0x79, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0xda, 0x41, 0x19, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, + 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, 0x6c, 0x6c, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, + 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0xbb, 0x01, 0x0a, 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0xda, 0x41, 0x18, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, 0x75, 0x73, 0x68, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x01, 0x2a, 0x22, + 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, + 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x89, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2c, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, - 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, - 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, - 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x84, 0x01, - 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, - 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, - 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x42, - 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, - 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, 0x56, 0x31, 0xca, 0x02, - 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x50, 0x75, - 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x3a, - 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x96, 0x01, 0x0a, 0x0d, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, + 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, + 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0xda, 0x41, + 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x1a, 0x21, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa3, + 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x4c, 0xda, 0x41, 0x14, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x38, 0xda, 0x41, 0x08, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x12, 0x84, 0x01, 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x65, 0x6b, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, + 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, 0xaa, 0x01, 0x0a, 0x14, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, + 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0x3b, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, + 0x62, 0x53, 0x75, 0x62, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5764,192 +6762,216 @@ func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte { return file_google_pubsub_v1_pubsub_proto_rawDescData } -var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 62) +var file_google_pubsub_v1_pubsub_proto_enumTypes = make([]protoimpl.EnumInfo, 7) +var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 72) var file_google_pubsub_v1_pubsub_proto_goTypes = []any{ - (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State - (Topic_State)(0), // 1: google.pubsub.v1.Topic.State - (Subscription_State)(0), // 2: google.pubsub.v1.Subscription.State - (BigQueryConfig_State)(0), // 3: google.pubsub.v1.BigQueryConfig.State - (CloudStorageConfig_State)(0), // 4: google.pubsub.v1.CloudStorageConfig.State - (*MessageStoragePolicy)(nil), // 5: google.pubsub.v1.MessageStoragePolicy - (*SchemaSettings)(nil), // 6: google.pubsub.v1.SchemaSettings - (*IngestionDataSourceSettings)(nil), // 7: google.pubsub.v1.IngestionDataSourceSettings - (*Topic)(nil), // 8: google.pubsub.v1.Topic - (*PubsubMessage)(nil), // 9: google.pubsub.v1.PubsubMessage - (*GetTopicRequest)(nil), // 10: google.pubsub.v1.GetTopicRequest - (*UpdateTopicRequest)(nil), // 11: google.pubsub.v1.UpdateTopicRequest - (*PublishRequest)(nil), // 12: google.pubsub.v1.PublishRequest - (*PublishResponse)(nil), // 13: google.pubsub.v1.PublishResponse - (*ListTopicsRequest)(nil), // 14: google.pubsub.v1.ListTopicsRequest - (*ListTopicsResponse)(nil), // 15: google.pubsub.v1.ListTopicsResponse - (*ListTopicSubscriptionsRequest)(nil), // 16: google.pubsub.v1.ListTopicSubscriptionsRequest - (*ListTopicSubscriptionsResponse)(nil), // 17: google.pubsub.v1.ListTopicSubscriptionsResponse - (*ListTopicSnapshotsRequest)(nil), // 18: google.pubsub.v1.ListTopicSnapshotsRequest - (*ListTopicSnapshotsResponse)(nil), // 19: google.pubsub.v1.ListTopicSnapshotsResponse - (*DeleteTopicRequest)(nil), // 20: google.pubsub.v1.DeleteTopicRequest - (*DetachSubscriptionRequest)(nil), // 21: google.pubsub.v1.DetachSubscriptionRequest - (*DetachSubscriptionResponse)(nil), // 22: google.pubsub.v1.DetachSubscriptionResponse - (*Subscription)(nil), // 23: google.pubsub.v1.Subscription - (*RetryPolicy)(nil), // 24: google.pubsub.v1.RetryPolicy - (*DeadLetterPolicy)(nil), // 25: google.pubsub.v1.DeadLetterPolicy - (*ExpirationPolicy)(nil), // 26: google.pubsub.v1.ExpirationPolicy - (*PushConfig)(nil), // 27: google.pubsub.v1.PushConfig - (*BigQueryConfig)(nil), // 28: google.pubsub.v1.BigQueryConfig - (*CloudStorageConfig)(nil), // 29: google.pubsub.v1.CloudStorageConfig - (*ReceivedMessage)(nil), // 30: google.pubsub.v1.ReceivedMessage - (*GetSubscriptionRequest)(nil), // 31: google.pubsub.v1.GetSubscriptionRequest - (*UpdateSubscriptionRequest)(nil), // 32: google.pubsub.v1.UpdateSubscriptionRequest - (*ListSubscriptionsRequest)(nil), // 33: google.pubsub.v1.ListSubscriptionsRequest - (*ListSubscriptionsResponse)(nil), // 34: google.pubsub.v1.ListSubscriptionsResponse - (*DeleteSubscriptionRequest)(nil), // 35: google.pubsub.v1.DeleteSubscriptionRequest - (*ModifyPushConfigRequest)(nil), // 36: google.pubsub.v1.ModifyPushConfigRequest - (*PullRequest)(nil), // 37: google.pubsub.v1.PullRequest - (*PullResponse)(nil), // 38: google.pubsub.v1.PullResponse - (*ModifyAckDeadlineRequest)(nil), // 39: google.pubsub.v1.ModifyAckDeadlineRequest - (*AcknowledgeRequest)(nil), // 40: google.pubsub.v1.AcknowledgeRequest - (*StreamingPullRequest)(nil), // 41: google.pubsub.v1.StreamingPullRequest - (*StreamingPullResponse)(nil), // 42: google.pubsub.v1.StreamingPullResponse - (*CreateSnapshotRequest)(nil), // 43: google.pubsub.v1.CreateSnapshotRequest - (*UpdateSnapshotRequest)(nil), // 44: google.pubsub.v1.UpdateSnapshotRequest - (*Snapshot)(nil), // 45: google.pubsub.v1.Snapshot - (*GetSnapshotRequest)(nil), // 46: google.pubsub.v1.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 47: google.pubsub.v1.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 48: google.pubsub.v1.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 49: google.pubsub.v1.DeleteSnapshotRequest - (*SeekRequest)(nil), // 50: google.pubsub.v1.SeekRequest - (*SeekResponse)(nil), // 51: google.pubsub.v1.SeekResponse - (*IngestionDataSourceSettings_AwsKinesis)(nil), // 52: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis - nil, // 53: google.pubsub.v1.Topic.LabelsEntry - nil, // 54: google.pubsub.v1.PubsubMessage.AttributesEntry - nil, // 55: google.pubsub.v1.Subscription.LabelsEntry - (*PushConfig_OidcToken)(nil), // 56: google.pubsub.v1.PushConfig.OidcToken - (*PushConfig_PubsubWrapper)(nil), // 57: google.pubsub.v1.PushConfig.PubsubWrapper - (*PushConfig_NoWrapper)(nil), // 58: google.pubsub.v1.PushConfig.NoWrapper - nil, // 59: google.pubsub.v1.PushConfig.AttributesEntry - (*CloudStorageConfig_TextConfig)(nil), // 60: google.pubsub.v1.CloudStorageConfig.TextConfig - (*CloudStorageConfig_AvroConfig)(nil), // 61: google.pubsub.v1.CloudStorageConfig.AvroConfig - (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 62: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 63: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - (*StreamingPullResponse_SubscriptionProperties)(nil), // 64: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - nil, // 65: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - nil, // 66: google.pubsub.v1.Snapshot.LabelsEntry - (Encoding)(0), // 67: google.pubsub.v1.Encoding - (*durationpb.Duration)(nil), // 68: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 69: google.protobuf.Timestamp - (*fieldmaskpb.FieldMask)(nil), // 70: google.protobuf.FieldMask - (*emptypb.Empty)(nil), // 71: google.protobuf.Empty + (IngestionDataSourceSettings_AwsKinesis_State)(0), // 0: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State + (IngestionDataSourceSettings_CloudStorage_State)(0), // 1: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State + (PlatformLogsSettings_Severity)(0), // 2: google.pubsub.v1.PlatformLogsSettings.Severity + (Topic_State)(0), // 3: google.pubsub.v1.Topic.State + (Subscription_State)(0), // 4: google.pubsub.v1.Subscription.State + (BigQueryConfig_State)(0), // 5: google.pubsub.v1.BigQueryConfig.State + (CloudStorageConfig_State)(0), // 6: google.pubsub.v1.CloudStorageConfig.State + (*MessageStoragePolicy)(nil), // 7: google.pubsub.v1.MessageStoragePolicy + (*SchemaSettings)(nil), // 8: google.pubsub.v1.SchemaSettings + (*IngestionDataSourceSettings)(nil), // 9: google.pubsub.v1.IngestionDataSourceSettings + (*PlatformLogsSettings)(nil), // 10: google.pubsub.v1.PlatformLogsSettings + (*IngestionFailureEvent)(nil), // 11: google.pubsub.v1.IngestionFailureEvent + (*Topic)(nil), // 12: google.pubsub.v1.Topic + (*PubsubMessage)(nil), // 13: google.pubsub.v1.PubsubMessage + (*GetTopicRequest)(nil), // 14: google.pubsub.v1.GetTopicRequest + (*UpdateTopicRequest)(nil), // 15: google.pubsub.v1.UpdateTopicRequest + (*PublishRequest)(nil), // 16: google.pubsub.v1.PublishRequest + (*PublishResponse)(nil), // 17: google.pubsub.v1.PublishResponse + (*ListTopicsRequest)(nil), // 18: google.pubsub.v1.ListTopicsRequest + (*ListTopicsResponse)(nil), // 19: google.pubsub.v1.ListTopicsResponse + (*ListTopicSubscriptionsRequest)(nil), // 20: google.pubsub.v1.ListTopicSubscriptionsRequest + (*ListTopicSubscriptionsResponse)(nil), // 21: google.pubsub.v1.ListTopicSubscriptionsResponse + (*ListTopicSnapshotsRequest)(nil), // 22: google.pubsub.v1.ListTopicSnapshotsRequest + (*ListTopicSnapshotsResponse)(nil), // 23: google.pubsub.v1.ListTopicSnapshotsResponse + (*DeleteTopicRequest)(nil), // 24: google.pubsub.v1.DeleteTopicRequest + (*DetachSubscriptionRequest)(nil), // 25: google.pubsub.v1.DetachSubscriptionRequest + (*DetachSubscriptionResponse)(nil), // 26: google.pubsub.v1.DetachSubscriptionResponse + (*Subscription)(nil), // 27: google.pubsub.v1.Subscription + (*RetryPolicy)(nil), // 28: google.pubsub.v1.RetryPolicy + (*DeadLetterPolicy)(nil), // 29: google.pubsub.v1.DeadLetterPolicy + (*ExpirationPolicy)(nil), // 30: google.pubsub.v1.ExpirationPolicy + (*PushConfig)(nil), // 31: google.pubsub.v1.PushConfig + (*BigQueryConfig)(nil), // 32: google.pubsub.v1.BigQueryConfig + (*CloudStorageConfig)(nil), // 33: google.pubsub.v1.CloudStorageConfig + (*ReceivedMessage)(nil), // 34: google.pubsub.v1.ReceivedMessage + (*GetSubscriptionRequest)(nil), // 35: google.pubsub.v1.GetSubscriptionRequest + (*UpdateSubscriptionRequest)(nil), // 36: google.pubsub.v1.UpdateSubscriptionRequest + (*ListSubscriptionsRequest)(nil), // 37: google.pubsub.v1.ListSubscriptionsRequest + (*ListSubscriptionsResponse)(nil), // 38: google.pubsub.v1.ListSubscriptionsResponse + (*DeleteSubscriptionRequest)(nil), // 39: google.pubsub.v1.DeleteSubscriptionRequest + (*ModifyPushConfigRequest)(nil), // 40: google.pubsub.v1.ModifyPushConfigRequest + (*PullRequest)(nil), // 41: google.pubsub.v1.PullRequest + (*PullResponse)(nil), // 42: google.pubsub.v1.PullResponse + (*ModifyAckDeadlineRequest)(nil), // 43: google.pubsub.v1.ModifyAckDeadlineRequest + (*AcknowledgeRequest)(nil), // 44: google.pubsub.v1.AcknowledgeRequest + (*StreamingPullRequest)(nil), // 45: google.pubsub.v1.StreamingPullRequest + (*StreamingPullResponse)(nil), // 46: google.pubsub.v1.StreamingPullResponse + (*CreateSnapshotRequest)(nil), // 47: google.pubsub.v1.CreateSnapshotRequest + (*UpdateSnapshotRequest)(nil), // 48: google.pubsub.v1.UpdateSnapshotRequest + (*Snapshot)(nil), // 49: google.pubsub.v1.Snapshot + (*GetSnapshotRequest)(nil), // 50: google.pubsub.v1.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 51: google.pubsub.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 52: google.pubsub.v1.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 53: google.pubsub.v1.DeleteSnapshotRequest + (*SeekRequest)(nil), // 54: google.pubsub.v1.SeekRequest + (*SeekResponse)(nil), // 55: google.pubsub.v1.SeekResponse + (*IngestionDataSourceSettings_AwsKinesis)(nil), // 56: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis + (*IngestionDataSourceSettings_CloudStorage)(nil), // 57: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage + (*IngestionDataSourceSettings_CloudStorage_TextFormat)(nil), // 58: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat + (*IngestionDataSourceSettings_CloudStorage_AvroFormat)(nil), // 59: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat + (*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat)(nil), // 60: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat + (*IngestionFailureEvent_ApiViolationReason)(nil), // 61: google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + (*IngestionFailureEvent_AvroFailureReason)(nil), // 62: google.pubsub.v1.IngestionFailureEvent.AvroFailureReason + (*IngestionFailureEvent_CloudStorageFailure)(nil), // 63: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure + nil, // 64: google.pubsub.v1.Topic.LabelsEntry + nil, // 65: google.pubsub.v1.PubsubMessage.AttributesEntry + (*Subscription_AnalyticsHubSubscriptionInfo)(nil), // 66: google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo + nil, // 67: google.pubsub.v1.Subscription.LabelsEntry + (*PushConfig_OidcToken)(nil), // 68: google.pubsub.v1.PushConfig.OidcToken + (*PushConfig_PubsubWrapper)(nil), // 69: google.pubsub.v1.PushConfig.PubsubWrapper + (*PushConfig_NoWrapper)(nil), // 70: google.pubsub.v1.PushConfig.NoWrapper + nil, // 71: google.pubsub.v1.PushConfig.AttributesEntry + (*CloudStorageConfig_TextConfig)(nil), // 72: google.pubsub.v1.CloudStorageConfig.TextConfig + (*CloudStorageConfig_AvroConfig)(nil), // 73: google.pubsub.v1.CloudStorageConfig.AvroConfig + (*StreamingPullResponse_AcknowledgeConfirmation)(nil), // 74: google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + (*StreamingPullResponse_ModifyAckDeadlineConfirmation)(nil), // 75: google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + (*StreamingPullResponse_SubscriptionProperties)(nil), // 76: google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + nil, // 77: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + nil, // 78: google.pubsub.v1.Snapshot.LabelsEntry + (Encoding)(0), // 79: google.pubsub.v1.Encoding + (*durationpb.Duration)(nil), // 80: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 82: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 83: google.protobuf.Empty } var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{ - 67, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding - 52, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis - 53, // 2: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry - 5, // 3: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy - 6, // 4: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings - 68, // 5: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration - 1, // 6: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State - 7, // 7: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings - 54, // 8: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry - 69, // 9: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp - 8, // 10: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic - 70, // 11: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask - 9, // 12: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage - 8, // 13: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic - 27, // 14: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig - 28, // 15: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig - 29, // 16: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig - 68, // 17: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration - 55, // 18: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry - 26, // 19: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy - 25, // 20: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy - 24, // 21: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy - 68, // 22: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration - 2, // 23: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State - 68, // 24: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration - 68, // 25: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration - 68, // 26: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration - 59, // 27: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry - 56, // 28: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken - 57, // 29: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper - 58, // 30: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper - 3, // 31: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State - 60, // 32: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig - 61, // 33: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig - 68, // 34: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration - 4, // 35: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State - 9, // 36: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage - 23, // 37: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription - 70, // 38: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask - 23, // 39: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription - 27, // 40: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig - 30, // 41: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 30, // 42: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage - 62, // 43: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation - 63, // 44: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation - 64, // 45: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties - 65, // 46: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry - 45, // 47: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot - 70, // 48: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask - 69, // 49: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp - 66, // 50: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry - 45, // 51: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot - 69, // 52: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp - 0, // 53: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State - 8, // 54: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic - 11, // 55: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest - 12, // 56: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest - 10, // 57: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest - 14, // 58: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest - 16, // 59: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest - 18, // 60: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest - 20, // 61: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest - 21, // 62: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest - 23, // 63: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription - 31, // 64: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest - 32, // 65: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest - 33, // 66: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest - 35, // 67: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest - 39, // 68: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest - 40, // 69: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest - 37, // 70: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest - 41, // 71: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest - 36, // 72: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest - 46, // 73: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest - 47, // 74: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest - 43, // 75: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest - 44, // 76: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest - 49, // 77: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest - 50, // 78: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest - 8, // 79: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic - 8, // 80: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic - 13, // 81: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse - 8, // 82: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic - 15, // 83: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse - 17, // 84: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse - 19, // 85: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse - 71, // 86: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty - 22, // 87: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse - 23, // 88: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription - 23, // 89: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription - 23, // 90: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription - 34, // 91: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse - 71, // 92: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty - 71, // 93: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty - 71, // 94: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty - 38, // 95: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse - 42, // 96: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse - 71, // 97: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty - 45, // 98: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot - 48, // 99: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse - 45, // 100: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot - 45, // 101: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot - 71, // 102: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty - 51, // 103: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse - 79, // [79:104] is the sub-list for method output_type - 54, // [54:79] is the sub-list for method input_type - 54, // [54:54] is the sub-list for extension type_name - 54, // [54:54] is the sub-list for extension extendee - 0, // [0:54] is the sub-list for field type_name + 79, // 0: google.pubsub.v1.SchemaSettings.encoding:type_name -> google.pubsub.v1.Encoding + 56, // 1: google.pubsub.v1.IngestionDataSourceSettings.aws_kinesis:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis + 57, // 2: google.pubsub.v1.IngestionDataSourceSettings.cloud_storage:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage + 10, // 3: google.pubsub.v1.IngestionDataSourceSettings.platform_logs_settings:type_name -> google.pubsub.v1.PlatformLogsSettings + 2, // 4: google.pubsub.v1.PlatformLogsSettings.severity:type_name -> google.pubsub.v1.PlatformLogsSettings.Severity + 63, // 5: google.pubsub.v1.IngestionFailureEvent.cloud_storage_failure:type_name -> google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure + 64, // 6: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry + 7, // 7: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy + 8, // 8: google.pubsub.v1.Topic.schema_settings:type_name -> google.pubsub.v1.SchemaSettings + 80, // 9: google.pubsub.v1.Topic.message_retention_duration:type_name -> google.protobuf.Duration + 3, // 10: google.pubsub.v1.Topic.state:type_name -> google.pubsub.v1.Topic.State + 9, // 11: google.pubsub.v1.Topic.ingestion_data_source_settings:type_name -> google.pubsub.v1.IngestionDataSourceSettings + 65, // 12: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry + 81, // 13: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp + 12, // 14: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic + 82, // 15: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask + 13, // 16: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage + 12, // 17: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic + 31, // 18: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig + 32, // 19: google.pubsub.v1.Subscription.bigquery_config:type_name -> google.pubsub.v1.BigQueryConfig + 33, // 20: google.pubsub.v1.Subscription.cloud_storage_config:type_name -> google.pubsub.v1.CloudStorageConfig + 80, // 21: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration + 67, // 22: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry + 30, // 23: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy + 29, // 24: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy + 28, // 25: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy + 80, // 26: google.pubsub.v1.Subscription.topic_message_retention_duration:type_name -> google.protobuf.Duration + 4, // 27: google.pubsub.v1.Subscription.state:type_name -> google.pubsub.v1.Subscription.State + 66, // 28: google.pubsub.v1.Subscription.analytics_hub_subscription_info:type_name -> google.pubsub.v1.Subscription.AnalyticsHubSubscriptionInfo + 80, // 29: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration + 80, // 30: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration + 80, // 31: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration + 71, // 32: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry + 68, // 33: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken + 69, // 34: google.pubsub.v1.PushConfig.pubsub_wrapper:type_name -> google.pubsub.v1.PushConfig.PubsubWrapper + 70, // 35: google.pubsub.v1.PushConfig.no_wrapper:type_name -> google.pubsub.v1.PushConfig.NoWrapper + 5, // 36: google.pubsub.v1.BigQueryConfig.state:type_name -> google.pubsub.v1.BigQueryConfig.State + 72, // 37: google.pubsub.v1.CloudStorageConfig.text_config:type_name -> google.pubsub.v1.CloudStorageConfig.TextConfig + 73, // 38: google.pubsub.v1.CloudStorageConfig.avro_config:type_name -> google.pubsub.v1.CloudStorageConfig.AvroConfig + 80, // 39: google.pubsub.v1.CloudStorageConfig.max_duration:type_name -> google.protobuf.Duration + 6, // 40: google.pubsub.v1.CloudStorageConfig.state:type_name -> google.pubsub.v1.CloudStorageConfig.State + 13, // 41: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage + 27, // 42: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription + 82, // 43: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask + 27, // 44: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription + 31, // 45: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig + 34, // 46: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 34, // 47: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 74, // 48: google.pubsub.v1.StreamingPullResponse.acknowledge_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.AcknowledgeConfirmation + 75, // 49: google.pubsub.v1.StreamingPullResponse.modify_ack_deadline_confirmation:type_name -> google.pubsub.v1.StreamingPullResponse.ModifyAckDeadlineConfirmation + 76, // 50: google.pubsub.v1.StreamingPullResponse.subscription_properties:type_name -> google.pubsub.v1.StreamingPullResponse.SubscriptionProperties + 77, // 51: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + 49, // 52: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot + 82, // 53: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 81, // 54: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp + 78, // 55: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry + 49, // 56: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot + 81, // 57: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp + 0, // 58: google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.AwsKinesis.State + 1, // 59: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.state:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.State + 58, // 60: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.text_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.TextFormat + 59, // 61: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.AvroFormat + 60, // 62: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.pubsub_avro_format:type_name -> google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.PubSubAvroFormat + 81, // 63: google.pubsub.v1.IngestionDataSourceSettings.CloudStorage.minimum_object_create_time:type_name -> google.protobuf.Timestamp + 62, // 64: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.avro_failure_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.AvroFailureReason + 61, // 65: google.pubsub.v1.IngestionFailureEvent.CloudStorageFailure.api_violation_reason:type_name -> google.pubsub.v1.IngestionFailureEvent.ApiViolationReason + 12, // 66: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic + 15, // 67: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest + 16, // 68: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest + 14, // 69: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest + 18, // 70: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest + 20, // 71: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest + 22, // 72: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest + 24, // 73: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest + 25, // 74: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest + 27, // 75: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription + 35, // 76: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest + 36, // 77: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest + 37, // 78: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest + 39, // 79: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest + 43, // 80: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest + 44, // 81: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest + 41, // 82: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest + 45, // 83: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest + 40, // 84: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest + 50, // 85: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest + 51, // 86: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest + 47, // 87: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest + 48, // 88: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest + 53, // 89: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest + 54, // 90: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest + 12, // 91: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic + 12, // 92: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic + 17, // 93: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse + 12, // 94: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic + 19, // 95: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse + 21, // 96: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse + 23, // 97: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse + 83, // 98: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty + 26, // 99: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse + 27, // 100: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription + 27, // 101: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription + 27, // 102: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription + 38, // 103: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse + 83, // 104: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty + 83, // 105: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty + 83, // 106: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty + 42, // 107: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse + 46, // 108: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse + 83, // 109: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty + 49, // 110: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot + 52, // 111: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse + 49, // 112: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot + 49, // 113: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot + 83, // 114: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty + 55, // 115: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse + 91, // [91:116] is the sub-list for method output_type + 66, // [66:91] is the sub-list for method input_type + 66, // [66:66] is the sub-list for extension type_name + 66, // [66:66] is the sub-list for extension extendee + 0, // [0:66] is the sub-list for field type_name } func init() { file_google_pubsub_v1_pubsub_proto_init() } @@ -5996,7 +7018,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Topic); i { + switch v := v.(*PlatformLogsSettings); i { case 0: return &v.state case 1: @@ -6008,7 +7030,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*PubsubMessage); i { + switch v := v.(*IngestionFailureEvent); i { case 0: return &v.state case 1: @@ -6020,7 +7042,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GetTopicRequest); i { + switch v := v.(*Topic); i { case 0: return &v.state case 1: @@ -6032,7 +7054,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UpdateTopicRequest); i { + switch v := v.(*PubsubMessage); i { case 0: return &v.state case 1: @@ -6044,7 +7066,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*PublishRequest); i { + switch v := v.(*GetTopicRequest); i { case 0: return &v.state case 1: @@ -6056,7 +7078,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*PublishResponse); i { + switch v := v.(*UpdateTopicRequest); i { case 0: return &v.state case 1: @@ -6068,7 +7090,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicsRequest); i { + switch v := v.(*PublishRequest); i { case 0: return &v.state case 1: @@ -6080,7 +7102,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicsResponse); i { + switch v := v.(*PublishResponse); i { case 0: return &v.state case 1: @@ -6092,7 +7114,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSubscriptionsRequest); i { + switch v := v.(*ListTopicsRequest); i { case 0: return &v.state case 1: @@ -6104,7 +7126,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSubscriptionsResponse); i { + switch v := v.(*ListTopicsResponse); i { case 0: return &v.state case 1: @@ -6116,7 +7138,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSnapshotsRequest); i { + switch v := v.(*ListTopicSubscriptionsRequest); i { case 0: return &v.state case 1: @@ -6128,7 +7150,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ListTopicSnapshotsResponse); i { + switch v := v.(*ListTopicSubscriptionsResponse); i { case 0: return &v.state case 1: @@ -6140,7 +7162,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*DeleteTopicRequest); i { + switch v := v.(*ListTopicSnapshotsRequest); i { case 0: return &v.state case 1: @@ -6152,7 +7174,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*DetachSubscriptionRequest); i { + switch v := v.(*ListTopicSnapshotsResponse); i { case 0: return &v.state case 1: @@ -6164,7 +7186,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*DetachSubscriptionResponse); i { + switch v := v.(*DeleteTopicRequest); i { case 0: return &v.state case 1: @@ -6176,7 +7198,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*Subscription); i { + switch v := v.(*DetachSubscriptionRequest); i { case 0: return &v.state case 1: @@ -6188,7 +7210,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*RetryPolicy); i { + switch v := v.(*DetachSubscriptionResponse); i { case 0: return &v.state case 1: @@ -6200,7 +7222,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*DeadLetterPolicy); i { + switch v := v.(*Subscription); i { case 0: return &v.state case 1: @@ -6212,7 +7234,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*ExpirationPolicy); i { + switch v := v.(*RetryPolicy); i { case 0: return &v.state case 1: @@ -6224,7 +7246,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig); i { + switch v := v.(*DeadLetterPolicy); i { case 0: return &v.state case 1: @@ -6236,7 +7258,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*BigQueryConfig); i { + switch v := v.(*ExpirationPolicy); i { case 0: return &v.state case 1: @@ -6248,7 +7270,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*CloudStorageConfig); i { + switch v := v.(*PushConfig); i { case 0: return &v.state case 1: @@ -6260,7 +7282,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ReceivedMessage); i { + switch v := v.(*BigQueryConfig); i { case 0: return &v.state case 1: @@ -6272,7 +7294,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*GetSubscriptionRequest); i { + switch v := v.(*CloudStorageConfig); i { case 0: return &v.state case 1: @@ -6284,7 +7306,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*UpdateSubscriptionRequest); i { + switch v := v.(*ReceivedMessage); i { case 0: return &v.state case 1: @@ -6296,7 +7318,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*ListSubscriptionsRequest); i { + switch v := v.(*GetSubscriptionRequest); i { case 0: return &v.state case 1: @@ -6308,7 +7330,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*ListSubscriptionsResponse); i { + switch v := v.(*UpdateSubscriptionRequest); i { case 0: return &v.state case 1: @@ -6320,7 +7342,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSubscriptionRequest); i { + switch v := v.(*ListSubscriptionsRequest); i { case 0: return &v.state case 1: @@ -6332,7 +7354,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*ModifyPushConfigRequest); i { + switch v := v.(*ListSubscriptionsResponse); i { case 0: return &v.state case 1: @@ -6344,7 +7366,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*PullRequest); i { + switch v := v.(*DeleteSubscriptionRequest); i { case 0: return &v.state case 1: @@ -6356,7 +7378,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*PullResponse); i { + switch v := v.(*ModifyPushConfigRequest); i { case 0: return &v.state case 1: @@ -6368,7 +7390,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*ModifyAckDeadlineRequest); i { + switch v := v.(*PullRequest); i { case 0: return &v.state case 1: @@ -6380,7 +7402,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*AcknowledgeRequest); i { + switch v := v.(*PullResponse); i { case 0: return &v.state case 1: @@ -6392,7 +7414,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullRequest); i { + switch v := v.(*ModifyAckDeadlineRequest); i { case 0: return &v.state case 1: @@ -6404,7 +7426,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*StreamingPullResponse); i { + switch v := v.(*AcknowledgeRequest); i { case 0: return &v.state case 1: @@ -6416,7 +7438,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*CreateSnapshotRequest); i { + switch v := v.(*StreamingPullRequest); i { case 0: return &v.state case 1: @@ -6428,7 +7450,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*UpdateSnapshotRequest); i { + switch v := v.(*StreamingPullResponse); i { case 0: return &v.state case 1: @@ -6440,7 +7462,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*Snapshot); i { + switch v := v.(*CreateSnapshotRequest); i { case 0: return &v.state case 1: @@ -6452,7 +7474,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*GetSnapshotRequest); i { + switch v := v.(*UpdateSnapshotRequest); i { case 0: return &v.state case 1: @@ -6464,7 +7486,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*ListSnapshotsRequest); i { + switch v := v.(*Snapshot); i { case 0: return &v.state case 1: @@ -6476,7 +7498,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[43].Exporter = func(v any, i int) any { - switch v := v.(*ListSnapshotsResponse); i { + switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state case 1: @@ -6488,7 +7510,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[44].Exporter = func(v any, i int) any { - switch v := v.(*DeleteSnapshotRequest); i { + switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state case 1: @@ -6500,7 +7522,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[45].Exporter = func(v any, i int) any { - switch v := v.(*SeekRequest); i { + switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state case 1: @@ -6512,7 +7534,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[46].Exporter = func(v any, i int) any { - switch v := v.(*SeekResponse); i { + switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state case 1: @@ -6524,6 +7546,30 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[47].Exporter = func(v any, i int) any { + switch v := v.(*SeekRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[48].Exporter = func(v any, i int) any { + switch v := v.(*SeekResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*IngestionDataSourceSettings_AwsKinesis); i { case 0: return &v.state @@ -6535,8 +7581,20 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } + file_google_pubsub_v1_pubsub_proto_msgTypes[50].Exporter = func(v any, i int) any { + switch v := v.(*IngestionDataSourceSettings_CloudStorage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_google_pubsub_v1_pubsub_proto_msgTypes[51].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_OidcToken); i { + switch v := v.(*IngestionDataSourceSettings_CloudStorage_TextFormat); i { case 0: return &v.state case 1: @@ -6548,7 +7606,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_PubsubWrapper); i { + switch v := v.(*IngestionDataSourceSettings_CloudStorage_AvroFormat); i { case 0: return &v.state case 1: @@ -6560,7 +7618,19 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*PushConfig_NoWrapper); i { + switch v := v.(*IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[54].Exporter = func(v any, i int) any { + switch v := v.(*IngestionFailureEvent_ApiViolationReason); i { case 0: return &v.state case 1: @@ -6572,7 +7642,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[55].Exporter = func(v any, i int) any { - switch v := v.(*CloudStorageConfig_TextConfig); i { + switch v := v.(*IngestionFailureEvent_AvroFailureReason); i { case 0: return &v.state case 1: @@ -6584,6 +7654,78 @@ func file_google_pubsub_v1_pubsub_proto_init() { } } file_google_pubsub_v1_pubsub_proto_msgTypes[56].Exporter = func(v any, i int) any { + switch v := v.(*IngestionFailureEvent_CloudStorageFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[59].Exporter = func(v any, i int) any { + switch v := v.(*Subscription_AnalyticsHubSubscriptionInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[61].Exporter = func(v any, i int) any { + switch v := v.(*PushConfig_OidcToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[62].Exporter = func(v any, i int) any { + switch v := v.(*PushConfig_PubsubWrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[63].Exporter = func(v any, i int) any { + switch v := v.(*PushConfig_NoWrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[65].Exporter = func(v any, i int) any { + switch v := v.(*CloudStorageConfig_TextConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[66].Exporter = func(v any, i int) any { switch v := v.(*CloudStorageConfig_AvroConfig); i { case 0: return &v.state @@ -6595,7 +7737,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[57].Exporter = func(v any, i int) any { + file_google_pubsub_v1_pubsub_proto_msgTypes[67].Exporter = func(v any, i int) any { switch v := v.(*StreamingPullResponse_AcknowledgeConfirmation); i { case 0: return &v.state @@ -6607,7 +7749,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[58].Exporter = func(v any, i int) any { + file_google_pubsub_v1_pubsub_proto_msgTypes[68].Exporter = func(v any, i int) any { switch v := v.(*StreamingPullResponse_ModifyAckDeadlineConfirmation); i { case 0: return &v.state @@ -6619,7 +7761,7 @@ func file_google_pubsub_v1_pubsub_proto_init() { return nil } } - file_google_pubsub_v1_pubsub_proto_msgTypes[59].Exporter = func(v any, i int) any { + file_google_pubsub_v1_pubsub_proto_msgTypes[69].Exporter = func(v any, i int) any { switch v := v.(*StreamingPullResponse_SubscriptionProperties); i { case 0: return &v.state @@ -6634,27 +7776,41 @@ func file_google_pubsub_v1_pubsub_proto_init() { } file_google_pubsub_v1_pubsub_proto_msgTypes[2].OneofWrappers = []any{ (*IngestionDataSourceSettings_AwsKinesis_)(nil), + (*IngestionDataSourceSettings_CloudStorage_)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[4].OneofWrappers = []any{ + (*IngestionFailureEvent_CloudStorageFailure_)(nil), } - file_google_pubsub_v1_pubsub_proto_msgTypes[22].OneofWrappers = []any{ + file_google_pubsub_v1_pubsub_proto_msgTypes[24].OneofWrappers = []any{ (*PushConfig_OidcToken_)(nil), (*PushConfig_PubsubWrapper_)(nil), (*PushConfig_NoWrapper_)(nil), } - file_google_pubsub_v1_pubsub_proto_msgTypes[24].OneofWrappers = []any{ + file_google_pubsub_v1_pubsub_proto_msgTypes[26].OneofWrappers = []any{ (*CloudStorageConfig_TextConfig_)(nil), (*CloudStorageConfig_AvroConfig_)(nil), } - file_google_pubsub_v1_pubsub_proto_msgTypes[45].OneofWrappers = []any{ + file_google_pubsub_v1_pubsub_proto_msgTypes[47].OneofWrappers = []any{ (*SeekRequest_Time)(nil), (*SeekRequest_Snapshot)(nil), } + file_google_pubsub_v1_pubsub_proto_msgTypes[50].OneofWrappers = []any{ + (*IngestionDataSourceSettings_CloudStorage_TextFormat_)(nil), + (*IngestionDataSourceSettings_CloudStorage_AvroFormat_)(nil), + (*IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[51].OneofWrappers = []any{} + file_google_pubsub_v1_pubsub_proto_msgTypes[56].OneofWrappers = []any{ + (*IngestionFailureEvent_CloudStorageFailure_AvroFailureReason)(nil), + (*IngestionFailureEvent_CloudStorageFailure_ApiViolationReason)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc, - NumEnums: 5, - NumMessages: 62, + NumEnums: 7, + NumMessages: 72, NumExtensions: 0, NumServices: 2, }, diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go index 4013a77e..18ea325b 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/schema_client.go @@ -69,6 +69,7 @@ func defaultSchemaGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -572,6 +573,7 @@ func defaultSchemaRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go index 65d3ce16..6b673129 100644 --- a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -76,6 +76,7 @@ func defaultSubscriberGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), + internaloption.EnableNewAuthLibrary(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -876,6 +877,7 @@ func defaultSubscriberRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://pubsub.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index 6107d7a6..9d9a2818 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -14,11 +14,11 @@ /* Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub -messages, hiding the details of the underlying server RPCs. Google Cloud +messages, hiding the details of the underlying server RPCs. Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders and receivers. -More information about Google Cloud Pub/Sub is available at +More information about Pub/Sub is available at https://cloud.google.com/pubsub/docs See https://godoc.org/cloud.google.com/go for authentication, timeouts, @@ -26,39 +26,39 @@ connection pooling and similar aspects of this package. # Publishing -Google Cloud Pub/Sub messages are published to topics. Topics may be created -using the pubsub package like so: +Pub/Sub messages are published to topics. A [Topic] may be created +using [Client.CreateTopic] like so: topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") -Messages may then be published to a topic: +Messages may then be published to a [Topic]: res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) -Publish queues the message for publishing and returns immediately. When enough +[Topic.Publish] queues the message for publishing and returns immediately. When enough messages have accumulated, or enough time has elapsed, the batch of messages is sent to the Pub/Sub service. -Publish returns a PublishResult, which behaves like a future: its Get method +[Topic.Publish] returns a [PublishResult], which behaves like a future: its Get method blocks until the message has been sent to the service. -The first time you call Publish on a topic, goroutines are started in the -background. To clean up these goroutines, call Stop: +The first time you call [Topic.Publish] on a [Topic], goroutines are started in the +background. To clean up these goroutines, call [Topic.Stop]: topic.Stop() # Receiving -To receive messages published to a topic, clients create subscriptions -to the topic. There may be more than one subscription per topic; each message -that is published to the topic will be delivered to all of its subscriptions. +To receive messages published to a [Topic], clients create a [Subscription] +for the topic. There may be more than one subscription per topic ; each message +that is published to the topic will be delivered to all associated subscriptions. -Subscriptions may be created like so: +A [Subscription] may be created like so: sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", pubsub.SubscriptionConfig{Topic: topic}) -Messages are then consumed from a subscription via callback. +Messages are then consumed from a [Subscription] via callback. err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { log.Printf("Got message: %s", m.Data) @@ -69,19 +69,19 @@ Messages are then consumed from a subscription via callback. } The callback is invoked concurrently by multiple goroutines, maximizing -throughput. To terminate a call to Receive, cancel its context. +throughput. To terminate a call to [Subscription.Receive], cancel its context. -Once client code has processed the message, it must call Message.Ack or -Message.Nack; otherwise the message will eventually be redelivered. Ack/Nack -MUST be called within the Receive handler function, and not from a goroutine. +Once client code has processed the [Message], it must call Message.Ack or +Message.Nack; otherwise the Message will eventually be redelivered. Ack/Nack +MUST be called within the [Subscription.Receive] handler function, and not from a goroutine. Otherwise, flow control (e.g. ReceiveSettings.MaxOutstandingMessages) will not be respected, and messages can get orphaned when cancelling Receive. If the client cannot or doesn't want to process the message, it can call Message.Nack to speed redelivery. For more information and configuration options, see -"Ack Deadlines" below. +Ack Deadlines below. -Note: It is possible for Messages to be redelivered even if Message.Ack has +Note: It is possible for a [Message] to be redelivered even if Message.Ack has been called. Client code must be robust to multiple deliveries of messages. Note: This uses pubsub's streaming pull feature. This feature has properties that @@ -91,7 +91,7 @@ pull method. # Streams Management -The number of StreamingPull connections can be configured by setting sub.ReceiveSettings.NumGoroutines. +The number of StreamingPull connections can be configured by setting NumGoroutines in [ReceiveSettings]. The default value of 10 means the client library will maintain 10 StreamingPull connections. This is more than sufficient for most use cases, as StreamingPull connections can handle up to 10 MB/s https://cloud.google.com/pubsub/quotas#resource_limits. In some cases, using too many streams @@ -125,10 +125,8 @@ either: - The "MaxExtension" duration elapses from the time the message is fetched from the server. This defaults to 60m. -Ack deadlines are extended periodically by the client. The initial ack -deadline given to messages is based on the subscription's AckDeadline property, -which defaults to 10s. The period between extensions, as well as the -length of the extension, automatically adjusts based on the time it takes the +Ack deadlines are extended periodically by the client. The period between extensions, +as well as the length of the extension, automatically adjusts based on the time it takes the subscriber application to ack messages (based on the 99th percentile of ack latency). By default, this extension period is capped at 10m, but this limit can be configured by the "MaxExtensionPeriod" setting. This has the effect that subscribers that process @@ -144,15 +142,7 @@ library sends such an extension: the Pub/Sub server would wait the remaining 2m55s before re-sending the messages out to other subscribers. Please note that by default, the client library does not use the subscription's -AckDeadline for the MaxExtension value. To enforce the subscription's AckDeadline, -set MaxExtension to the subscription's AckDeadline: - - cfg, err := sub.Config(ctx) - if err != nil { - // TODO: handle err - } - - sub.ReceiveSettings.MaxExtension = cfg.AckDeadline +AckDeadline for the MaxExtension value. # Slow Message Processing @@ -164,7 +154,7 @@ by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/ap To use an emulator with this library, you can set the PUBSUB_EMULATOR_HOST environment variable to the address at which your emulator is running. This will -send requests to that address instead of to Cloud Pub/Sub. You can then create +send requests to that address instead of to Pub/Sub. You can then create and use a client as usual: // Set PUBSUB_EMULATOR_HOST environment variable. diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go index f37b8600..e7729577 100644 --- a/vendor/cloud.google.com/go/pubsub/internal/version.go +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.42.0" +const Version = "1.45.1" diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index 9f603590..4f791fa6 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -335,14 +335,17 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { if m.Attributes != nil { ctx = propagation.TraceContext{}.Extract(ctx, newMessageCarrier(m)) } - attr := getSubscriberOpts(it.projectID, it.subID, m) - _, span := startSpan(ctx, subscribeSpanName, it.subID, attr...) - span.SetAttributes( - attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), - attribute.String(ackIDAttribute, ackID), - semconv.MessagingBatchMessageCount(len(msgs)), - semconv.CodeFunction("receive"), + opts := getSubscriberOpts(it.projectID, it.subID, m) + opts = append( + opts, + trace.WithAttributes( + attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), + attribute.String(ackIDAttribute, ackID), + semconv.MessagingBatchMessageCount(len(msgs)), + semconv.CodeFunction("receive"), + ), ) + _, span := startSpan(ctx, subscribeSpanName, it.subID, opts...) // Always store the subscribe span, even if sampling isn't enabled. // This is useful since we need to propagate the sampling flag // to the callback in Receive, so traces have an unbroken sampling decision. @@ -658,11 +661,16 @@ func (it *messageIterator) sendAck(m map[string]*AckResult) { // Create the single ack span for this request, and for each // message, add Subscribe<->Ack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendAck"), + ), + ) _, ackSpan := startSpan(context.Background(), ackSpanName, it.subID, opts...) defer ackSpan.End() - ackSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendAck")) if ackSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ @@ -740,16 +748,25 @@ func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Dur // Create the single modack/nack span for this request, and for each // message, add Subscribe<->Modack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) - _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) - defer mSpan.End() + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendModAck"), + ), + ) if !isNack { - mSpan.SetAttributes( - semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), - attribute.Bool(receiptModackAttribute, isReceipt)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), + attribute.Bool(receiptModackAttribute, isReceipt), + ), + ) } - mSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendModAck")) + _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) + defer mSpan.End() if mSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index c5ea8f51..231e5a64 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -31,8 +31,9 @@ import ( // the stream on a retryable error. type pullStream struct { ctx context.Context - open func() (pb.Subscriber_StreamingPullClient, error) - cancel context.CancelFunc + cancel context.CancelFunc // cancel function of the context above + open func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) + close context.CancelFunc // cancel function to close down the currently open stream mu sync.Mutex spc *pb.Subscriber_StreamingPullClient @@ -50,8 +51,9 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName return &pullStream{ ctx: ctx, cancel: cancel, - open: func() (pb.Subscriber_StreamingPullClient, error) { - spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + open: func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { + sctx, close := context.WithCancel(ctx) + spc, err := streamingPull(sctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { recordStat(ctx, StreamRequestCount, 1) streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second) @@ -69,9 +71,10 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName }) } if err != nil { - return nil, err + close() + return nil, nil, err } - return spc, nil + return spc, close, nil }, } } @@ -100,29 +103,33 @@ func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber if spc != s.spc { return s.spc, nil } + // we are about to open a new stream: if necessary, make sure the previous one is closed + if s.close != nil { + s.close() + } // Either this is the very first call on this stream (s.spc == nil), or we have a valid // retry request. Either way, open a new stream. // The lock is held here for a long time, but it doesn't matter because no callers could get // anything done anyway. s.spc = new(pb.Subscriber_StreamingPullClient) - *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. + *s.spc, s.close, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. return s.spc, s.err } -func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) { +func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { r := defaultRetryer{} for { recordStat(s.ctx, StreamOpenCount, 1) - spc, err := s.open() + spc, close, err := s.open() bo, shouldRetry := r.Retry(err) if err != nil && shouldRetry { recordStat(s.ctx, StreamRetryCount, 1) if err := gax.Sleep(s.ctx, bo); err != nil { - return nil, err + return nil, nil, err } continue } - return spc, err + return spc, close, err } } diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index 1991fa7f..bb916f52 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -44,6 +44,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" fmpb "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -350,6 +351,9 @@ type TopicConfigToUpdate struct { // IngestionDataSourceSettings are settings for ingestion from a // data source into this topic. // + // When changing this value, the entire data source settings object must be applied, + // rather than just the differences. This includes the source and logging settings. + // // Use the zero value &IngestionDataSourceSettings{} to remove the ingestion settings from the topic. IngestionDataSourceSettings *IngestionDataSourceSettings } @@ -425,6 +429,8 @@ func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePo // IngestionDataSourceSettings enables ingestion from a data source into this topic. type IngestionDataSourceSettings struct { Source IngestionDataSource + + PlatformLogsSettings *PlatformLogsSettings } // IngestionDataSource is the kind of ingestion source to be used. @@ -495,6 +501,97 @@ func (i *IngestionDataSourceAWSKinesis) isIngestionDataSource() bool { return true } +// CloudStorageIngestionState denotes the possible states for ingestion from Cloud Storage. +type CloudStorageIngestionState int + +const ( + // CloudStorageIngestionStateUnspecified is the default value. This value is unused. + CloudStorageIngestionStateUnspecified = iota + + // CloudStorageIngestionStateActive means ingestion is active. + CloudStorageIngestionStateActive + + // CloudStorageIngestionPermissionDenied means encountering an error while calling the Cloud Storage API. + // This can happen if the Pub/Sub SA has not been granted the + // [appropriate permissions](https://cloud.google.com/storage/docs/access-control/iam-permissions): + // - storage.objects.list: to list the objects in a bucket. + // - storage.objects.get: to read the objects in a bucket. + // - storage.buckets.get: to verify the bucket exists. + CloudStorageIngestionPermissionDenied + + // CloudStorageIngestionPublishPermissionDenied means encountering an error when publishing to the topic. + // This can happen if the Pub/Sub SA has not been granted the [appropriate publish + // permissions](https://cloud.google.com/pubsub/docs/access-control#pubsub.publisher) + CloudStorageIngestionPublishPermissionDenied + + // CloudStorageIngestionBucketNotFound means the provided bucket doesn't exist. + CloudStorageIngestionBucketNotFound + + // CloudStorageIngestionTooManyObjects means the bucket has too many objects, ingestion will be paused. + CloudStorageIngestionTooManyObjects +) + +// IngestionDataSourceCloudStorage are ingestion settings for Cloud Storage. +type IngestionDataSourceCloudStorage struct { + // State is an output-only field indicating the state of the Cloud storage ingestion source. + State CloudStorageIngestionState + + // Bucket is the Cloud Storage bucket. The bucket name must be without any + // prefix like "gs://". See the bucket naming requirements (https://cloud.google.com/storage/docs/buckets#naming). + Bucket string + + // InputFormat is the format of objects in Cloud Storage. + // Defaults to TextFormat. + InputFormat ingestionDataSourceCloudStorageInputFormat + + // MinimumObjectCreateTime means objects with a larger or equal creation timestamp will be + // ingested. + MinimumObjectCreateTime time.Time + + // MatchGlob is the pattern used to match objects that will be ingested. If + // empty, all objects will be ingested. See the [supported + // patterns](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob). + MatchGlob string +} + +var _ IngestionDataSource = (*IngestionDataSourceCloudStorage)(nil) + +func (i *IngestionDataSourceCloudStorage) isIngestionDataSource() bool { + return true +} + +type ingestionDataSourceCloudStorageInputFormat interface { + isCloudStorageIngestionInputFormat() bool +} + +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageTextFormat)(nil) +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStorageAvroFormat)(nil) +var _ ingestionDataSourceCloudStorageInputFormat = (*IngestionDataSourceCloudStoragePubSubAvroFormat)(nil) + +// IngestionDataSourceCloudStorageTextFormat means Cloud Storage data will be interpreted as text. +type IngestionDataSourceCloudStorageTextFormat struct { + Delimiter string +} + +func (i *IngestionDataSourceCloudStorageTextFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + +// IngestionDataSourceCloudStorageAvroFormat means Cloud Storage data will be interpreted in Avro format. +type IngestionDataSourceCloudStorageAvroFormat struct{} + +func (i *IngestionDataSourceCloudStorageAvroFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + +// IngestionDataSourceCloudStoragePubSubAvroFormat is used assuming the data was written using Cloud +// Storage subscriptions https://cloud.google.com/pubsub/docs/cloudstorage. +type IngestionDataSourceCloudStoragePubSubAvroFormat struct{} + +func (i *IngestionDataSourceCloudStoragePubSubAvroFormat) isCloudStorageIngestionInputFormat() bool { + return true +} + func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *IngestionDataSourceSettings { if pbs == nil { return nil @@ -509,7 +606,33 @@ func protoToIngestionDataSourceSettings(pbs *pb.IngestionDataSourceSettings) *In AWSRoleARN: k.GetAwsRoleArn(), GCPServiceAccount: k.GetGcpServiceAccount(), } + } else if cs := pbs.GetCloudStorage(); cs != nil { + var format ingestionDataSourceCloudStorageInputFormat + switch t := cs.InputFormat.(type) { + case *pb.IngestionDataSourceSettings_CloudStorage_TextFormat_: + format = &IngestionDataSourceCloudStorageTextFormat{ + Delimiter: *t.TextFormat.Delimiter, + } + case *pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_: + format = &IngestionDataSourceCloudStorageAvroFormat{} + case *pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat: + format = &IngestionDataSourceCloudStoragePubSubAvroFormat{} + } + s.Source = &IngestionDataSourceCloudStorage{ + State: CloudStorageIngestionState(cs.GetState()), + Bucket: cs.GetBucket(), + InputFormat: format, + MinimumObjectCreateTime: cs.GetMinimumObjectCreateTime().AsTime(), + MatchGlob: cs.GetMatchGlob(), + } + } + + if pbs.PlatformLogsSettings != nil { + s.PlatformLogsSettings = &PlatformLogsSettings{ + Severity: PlatformLogsSeverity(pbs.PlatformLogsSettings.Severity), + } } + return s } @@ -522,6 +645,11 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings return nil } pbs := &pb.IngestionDataSourceSettings{} + if i.PlatformLogsSettings != nil { + pbs.PlatformLogsSettings = &pb.PlatformLogsSettings{ + Severity: pb.PlatformLogsSettings_Severity(i.PlatformLogsSettings.Severity), + } + } if out := i.Source; out != nil { if k, ok := out.(*IngestionDataSourceAWSKinesis); ok { pbs.Source = &pb.IngestionDataSourceSettings_AwsKinesis_{ @@ -534,10 +662,76 @@ func (i *IngestionDataSourceSettings) toProto() *pb.IngestionDataSourceSettings }, } } + if cs, ok := out.(*IngestionDataSourceCloudStorage); ok { + switch format := cs.InputFormat.(type) { + case *IngestionDataSourceCloudStorageTextFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat_{ + TextFormat: &pb.IngestionDataSourceSettings_CloudStorage_TextFormat{ + Delimiter: &format.Delimiter, + }, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + case *IngestionDataSourceCloudStorageAvroFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat_{ + AvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_AvroFormat{}, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + case *IngestionDataSourceCloudStoragePubSubAvroFormat: + pbs.Source = &pb.IngestionDataSourceSettings_CloudStorage_{ + CloudStorage: &pb.IngestionDataSourceSettings_CloudStorage{ + State: pb.IngestionDataSourceSettings_CloudStorage_State(cs.State), + Bucket: cs.Bucket, + InputFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubsubAvroFormat{ + PubsubAvroFormat: &pb.IngestionDataSourceSettings_CloudStorage_PubSubAvroFormat{}, + }, + MinimumObjectCreateTime: timestamppb.New(cs.MinimumObjectCreateTime), + MatchGlob: cs.MatchGlob, + }, + } + } + } } return pbs } +// PlatformLogsSettings configures logging produced by Pub/Sub. +// Currently only valid on Cloud Storage ingestion topics. +type PlatformLogsSettings struct { + Severity PlatformLogsSeverity +} + +// PlatformLogsSeverity are the severity levels of Platform Logs. +type PlatformLogsSeverity int32 + +const ( + // PlatformLogsSeverityUnspecified is the default value. Logs level is unspecified. Logs will be disabled. + PlatformLogsSeverityUnspecified PlatformLogsSeverity = iota + // PlatformLogsSeverityDisabled means logs will be disabled. + PlatformLogsSeverityDisabled + // PlatformLogsSeverityDebug means debug logs and higher-severity logs will be written. + PlatformLogsSeverityDebug + // PlatformLogsSeverityInfo means info logs and higher-severity logs will be written. + PlatformLogsSeverityInfo + // PlatformLogsSeverityWarning means warning logs and higher-severity logs will be written. + PlatformLogsSeverityWarning + // PlatformLogsSeverityError means only error logs will be written. + PlatformLogsSeverityError +) + // Config returns the TopicConfig for the topic. func (t *Topic) Config(ctx context.Context) (TopicConfig, error) { pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) @@ -748,8 +942,8 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { var createSpan trace.Span if t.enableTracing { opts := getPublishSpanAttributes(t.c.projectID, t.ID(), msg) + opts = append(opts, trace.WithAttributes(semconv.CodeFunction("Publish"))) ctx, createSpan = startSpan(ctx, createSpanName, t.ID(), opts...) - createSpan.SetAttributes(semconv.CodeFunction("Publish")) } ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) if err != nil { @@ -973,8 +1167,14 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) opts := getCommonOptions(projectID, topicID) // Add link to publish RPC span of createSpan(s). opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingBatchMessageCount(numMsgs), + semconv.CodeFunction("publishMessageBundle"), + ), + ) ctx, pSpan = startSpan(ctx, publishRPCSpanName, topicID, opts...) - pSpan.SetAttributes(semconv.MessagingBatchMessageCount(numMsgs), semconv.CodeFunction("publishMessageBundle")) defer pSpan.End() // Add the reverse link to createSpan(s) of publish RPC span. diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go index 1d41e9d8..51112bb5 100644 --- a/vendor/cloud.google.com/go/pubsub/trace.go +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -20,6 +20,7 @@ import ( "log" "sync" + pb "cloud.google.com/go/pubsub/apiv1/pubsubpb" "cloud.google.com/go/pubsub/internal" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -273,33 +274,42 @@ func tracer() trace.Tracer { var _ propagation.TextMapCarrier = (*messageCarrier)(nil) -// messageCarrier injects and extracts traces from a pubsub.Message. +// messageCarrier injects and extracts traces from pubsub.Message attributes. type messageCarrier struct { - msg *Message + attributes map[string]string } const googclientPrefix string = "googclient_" // newMessageCarrier creates a new PubsubMessageCarrier. func newMessageCarrier(msg *Message) messageCarrier { - return messageCarrier{msg: msg} + return messageCarrier{attributes: msg.Attributes} +} + +// NewMessageCarrierFromPB creates a propagation.TextMapCarrier that can be used to extract the trace +// context from a protobuf PubsubMessage. +// +// Example: +// ctx = propagation.TraceContext{}.Extract(ctx, pubsub.NewMessageCarrierFromPB(msg)) +func NewMessageCarrierFromPB(msg *pb.PubsubMessage) propagation.TextMapCarrier { + return messageCarrier{attributes: msg.Attributes} } // Get retrieves a single value for a given key. func (c messageCarrier) Get(key string) string { - return c.msg.Attributes[googclientPrefix+key] + return c.attributes[googclientPrefix+key] } // Set sets an attribute. func (c messageCarrier) Set(key, val string) { - c.msg.Attributes[googclientPrefix+key] = val + c.attributes[googclientPrefix+key] = val } // Keys returns a slice of all keys in the carrier. func (c messageCarrier) Keys() []string { i := 0 - out := make([]string, len(c.msg.Attributes)) - for k := range c.msg.Attributes { + out := make([]string, len(c.attributes)) + for k := range c.attributes { out[i] = k i++ } diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go b/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go index cfa0a5d8..9c288849 100644 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go +++ b/vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go @@ -7,8 +7,8 @@ package appsec import _ "embed" // Blank import comment for golint compliance -// StaticRecommendedRules holds the recommended AppSec security rules (v1.12.0) -// Source: https://github.com/DataDog/appsec-event-rules/blob/1.12.0/build/recommended.json +// StaticRecommendedRules holds the recommended AppSec security rules (v1.13.0) +// Source: https://github.com/DataDog/appsec-event-rules/blob/1.13.0/build/recommended.json // //go:embed rules.json var StaticRecommendedRules string diff --git a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json b/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json index 0b25be93..d05b1e62 100644 --- a/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json +++ b/vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json @@ -1,7 +1,7 @@ { "version": "2.2", "metadata": { - "rules_version": "1.12.0" + "rules_version": "1.13.1" }, "rules": [ { @@ -6239,7 +6239,6 @@ { "id": "rasp-930-100", "name": "Local file inclusion exploit", - "enabled": false, "tags": { "type": "lfi", "category": "vulnerability_trigger", @@ -6285,10 +6284,57 @@ "stack_trace" ] }, + { + "id": "rasp-932-100", + "name": "Shell injection exploit", + "tags": { + "type": "command_injection", + "category": "vulnerability_trigger", + "cwe": "77", + "capec": "1000/152/248/88", + "confidence": "0", + "module": "rasp" + }, + "conditions": [ + { + "parameters": { + "resource": [ + { + "address": "server.sys.shell.cmd" + } + ], + "params": [ + { + "address": "server.request.query" + }, + { + "address": "server.request.body" + }, + { + "address": "server.request.path_params" + }, + { + "address": "grpc.server.request.message" + }, + { + "address": "graphql.server.all_resolvers" + }, + { + "address": "graphql.server.resolver" + } + ] + }, + "operator": "shi_detector" + } + ], + "transformers": [], + "on_match": [ + "stack_trace" + ] + }, { "id": "rasp-934-100", "name": "Server-side request forgery exploit", - "enabled": false, "tags": { "type": "ssrf", "category": "vulnerability_trigger", @@ -6337,7 +6383,6 @@ { "id": "rasp-942-100", "name": "SQL injection exploit", - "enabled": false, "tags": { "type": "sql_injection", "category": "vulnerability_trigger", @@ -8388,6 +8433,57 @@ } ], "processors": [ + { + "id": "http-endpoint-fingerprint", + "generator": "http_endpoint_fingerprint", + "conditions": [ + { + "operator": "exists", + "parameters": { + "inputs": [ + { + "address": "waf.context.event" + }, + { + "address": "server.business_logic.users.login.failure" + }, + { + "address": "server.business_logic.users.login.success" + } + ] + } + } + ], + "parameters": { + "mappings": [ + { + "method": [ + { + "address": "server.request.method" + } + ], + "uri_raw": [ + { + "address": "server.request.uri.raw" + } + ], + "body": [ + { + "address": "server.request.body" + } + ], + "query": [ + { + "address": "server.request.query" + } + ], + "output": "_dd.appsec.fp.http.endpoint" + } + ] + }, + "evaluate": false, + "output": true + }, { "id": "extract-content", "generator": "extract_schema", @@ -8537,6 +8633,124 @@ }, "evaluate": false, "output": true + }, + { + "id": "http-header-fingerprint", + "generator": "http_header_fingerprint", + "conditions": [ + { + "operator": "exists", + "parameters": { + "inputs": [ + { + "address": "waf.context.event" + }, + { + "address": "server.business_logic.users.login.failure" + }, + { + "address": "server.business_logic.users.login.success" + } + ] + } + } + ], + "parameters": { + "mappings": [ + { + "headers": [ + { + "address": "server.request.headers.no_cookies" + } + ], + "output": "_dd.appsec.fp.http.header" + } + ] + }, + "evaluate": false, + "output": true + }, + { + "id": "http-network-fingerprint", + "generator": "http_network_fingerprint", + "conditions": [ + { + "operator": "exists", + "parameters": { + "inputs": [ + { + "address": "waf.context.event" + }, + { + "address": "server.business_logic.users.login.failure" + }, + { + "address": "server.business_logic.users.login.success" + } + ] + } + } + ], + "parameters": { + "mappings": [ + { + "headers": [ + { + "address": "server.request.headers.no_cookies" + } + ], + "output": "_dd.appsec.fp.http.network" + } + ] + }, + "evaluate": false, + "output": true + }, + { + "id": "session-fingerprint", + "generator": "session_fingerprint", + "conditions": [ + { + "operator": "exists", + "parameters": { + "inputs": [ + { + "address": "waf.context.event" + }, + { + "address": "server.business_logic.users.login.failure" + }, + { + "address": "server.business_logic.users.login.success" + } + ] + } + } + ], + "parameters": { + "mappings": [ + { + "cookies": [ + { + "address": "server.request.cookies" + } + ], + "session_id": [ + { + "address": "usr.session_id" + } + ], + "user_id": [ + { + "address": "usr.id" + } + ], + "output": "_dd.appsec.fp.session" + } + ] + }, + "evaluate": false, + "output": true } ], "scanners": [ @@ -9562,4 +9776,4 @@ } } ] -} \ No newline at end of file +} diff --git a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go index f6e29072..35974c5e 100644 --- a/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go +++ b/vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/products.go @@ -6,25 +6,30 @@ package state var validProducts = map[string]struct{}{ - ProductUpdaterCatalogDD: {}, - ProductUpdaterAgent: {}, - ProductUpdaterTask: {}, - ProductAgentConfig: {}, - ProductAgentFailover: {}, - ProductAgentTask: {}, - ProductAgentIntegrations: {}, - ProductAPMSampling: {}, - ProductCWSDD: {}, - ProductCWSCustom: {}, - ProductCWSProfiles: {}, - ProductASM: {}, - ProductASMFeatures: {}, - ProductASMDD: {}, - ProductASMData: {}, - ProductAPMTracing: {}, - ProductLiveDebugging: {}, - ProductTesting1: {}, - ProductTesting2: {}, + ProductUpdaterCatalogDD: {}, + ProductUpdaterAgent: {}, + ProductUpdaterTask: {}, + ProductAgentConfig: {}, + ProductAgentFailover: {}, + ProductAgentTask: {}, + ProductAgentIntegrations: {}, + ProductAPMSampling: {}, + ProductCWSDD: {}, + ProductCWSCustom: {}, + ProductCWSProfiles: {}, + ProductCSMSideScanning: {}, + ProductASM: {}, + ProductASMFeatures: {}, + ProductASMDD: {}, + ProductASMData: {}, + ProductAPMTracing: {}, + ProductSDSRules: {}, + ProductSDSAgentConfig: {}, + ProductLiveDebugging: {}, + ProductContainerAutoscalingSettings: {}, + ProductContainerAutoscalingValues: {}, + ProductTesting1: {}, + ProductTesting2: {}, } const ( @@ -50,6 +55,8 @@ const ( ProductCWSCustom = "CWS_CUSTOM" // ProductCWSProfiles is the cloud workload security profile product ProductCWSProfiles = "CWS_SECURITY_PROFILES" + // ProductCSMSideScanning is the side scanning product + ProductCSMSideScanning = "CSM_SIDE_SCANNING" // ProductASM is the ASM product used by customers to issue rules configurations ProductASM = "ASM" // ProductASMFeatures is the ASM product used form ASM activation through remote config @@ -60,8 +67,16 @@ const ( ProductASMData = "ASM_DATA" // ProductAPMTracing is the apm tracing product ProductAPMTracing = "APM_TRACING" + // ProductSDSRules is the SDS definitions product + ProductSDSRules = "SDS_RULES_DD" + // ProductSDSAgentConfig is the user SDS configurations product. + ProductSDSAgentConfig = "SDS_AGENT_CONFIG" // ProductLiveDebugging is the dynamic instrumentation product ProductLiveDebugging = "LIVE_DEBUGGING" + // ProductContainerAutoscalingSettings receives definition of container autoscaling + ProductContainerAutoscalingSettings = "CONTAINER_AUTOSCALING_SETTINGS" + // ProductContainerAutoscalingValues receives values for container autoscaling + ProductContainerAutoscalingValues = "CONTAINER_AUTOSCALING_VALUES" // ProductTesting1 is a product used for testing remote config ProductTesting1 = "TESTING1" // ProductTesting2 is a product used for testing remote config diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/README.md b/vendor/github.com/DataDog/go-libddwaf/v3/README.md index 56607916..013b663b 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/README.md +++ b/vendor/github.com/DataDog/go-libddwaf/v3/README.md @@ -129,13 +129,14 @@ Here is an example of the flow of operations on a simple call to Run(): This library uses [purego](https://github.com/ebitengine/purego) to implement C bindings without requiring use of CGO at compilation time. The high-level workflow is to embed the C shared library using `go:embed`, dump it into a file, open the library using `dlopen`, load the -symbols using `dlsym`, and finally call them. +symbols using `dlsym`, and finally call them. On Linux systems, using `memfd_create(2)` enables the library to be loaded without +writing to the filesystem. -> :warning: Keep in mind that **purego only works on linux/darwin for amd64/arm64 and so does go-libddwaf.** - -Another requirement of `libddwaf` is to have a FHS filesystem on your machine and, for linux, to provide `libc.so.6`, +Another requirement of `libddwaf` is to have a FHS filesystem on your machine and, for Linux, to provide `libc.so.6`, `libpthread.so.0`, and `libdl.so.2` as dynamic libraries. +> :warning: Keep in mind that **purego only works on linux/darwin for amd64/arm64 and so does go-libddwaf.** + ## Contributing pitfalls - Cannot dlopen twice in the app lifetime on OSX. It messes with Thread Local Storage and usually finishes with a `std::bad_alloc()` diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/context.go b/vendor/github.com/DataDog/go-libddwaf/v3/context.go index a6dc7677..509f7f0c 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/context.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/context.go @@ -7,14 +7,13 @@ package waf import ( "sync" + "sync/atomic" "time" "github.com/DataDog/go-libddwaf/v3/errors" "github.com/DataDog/go-libddwaf/v3/internal/bindings" "github.com/DataDog/go-libddwaf/v3/internal/unsafe" "github.com/DataDog/go-libddwaf/v3/timer" - - "sync/atomic" ) // Context is a WAF execution context. It allows running the WAF incrementally @@ -26,9 +25,10 @@ type Context struct { cgoRefs cgoRefPool // Used to retain go data referenced by WAF Objects the context holds cContext bindings.WafContext // The C ddwaf_context pointer - timeoutCount atomic.Uint64 // Cumulative timeout count for this context. + // timeoutCount count all calls which have timeout'ed by scope. Keys are fixed at creation time. + timeoutCount map[Scope]*atomic.Uint64 - // Mutex protecting the use of cContext which is not thread-safe and cgoRefs. + // mutex protecting the use of cContext which is not thread-safe and cgoRefs. mutex sync.Mutex // timer registers the time spent in the WAF and go-libddwaf @@ -39,7 +39,7 @@ type Context struct { // truncations provides details about truncations that occurred while // encoding address data for WAF execution. - truncations map[TruncationReason][]int + truncations map[Scope]map[TruncationReason][]int } // RunAddressData provides address data to the Context.Run method. If a given key is present in both @@ -51,6 +51,8 @@ type RunAddressData struct { // Ephemeral address data is scoped to a given Context.Run call and is not persisted across calls. This is used for // protocols such as gRPC client/server streaming or GraphQL, where a single request can incur multiple subrequests. Ephemeral map[string]any + // Scope is the way to classify the different runs in the same context in order to have different metrics + Scope Scope } func (d RunAddressData) isEmpty() bool { @@ -70,9 +72,13 @@ func (context *Context) Run(addressData RunAddressData) (res Result, err error) return } + if addressData.Scope == "" { + addressData.Scope = DefaultScope + } + defer func() { if err == errors.ErrTimeout { - context.timeoutCount.Add(1) + context.timeoutCount[addressData.Scope].Add(1) } }() @@ -94,13 +100,13 @@ func (context *Context) Run(addressData RunAddressData) (res Result, err error) runTimer.Start() defer func() { - context.metrics.add(wafRunTag, runTimer.Stop()) - context.metrics.merge(runTimer.Stats()) + context.metrics.add(addressData.Scope, wafRunTag, runTimer.Stop()) + context.metrics.merge(addressData.Scope, runTimer.Stats()) }() wafEncodeTimer := runTimer.MustLeaf(wafEncodeTag) wafEncodeTimer.Start() - persistentData, persistentEncoder, err := context.encodeOneAddressType(addressData.Persistent, wafEncodeTimer) + persistentData, persistentEncoder, err := context.encodeOneAddressType(addressData.Scope, addressData.Persistent, wafEncodeTimer) if err != nil { wafEncodeTimer.Stop() return res, err @@ -108,7 +114,7 @@ func (context *Context) Run(addressData RunAddressData) (res Result, err error) // The WAF releases ephemeral address data at the max of each run call, so we need not keep the Go values live beyond // that in the same way we need for persistent data. We hence use a separate encoder. - ephemeralData, ephemeralEncoder, err := context.encodeOneAddressType(addressData.Ephemeral, wafEncodeTimer) + ephemeralData, ephemeralEncoder, err := context.encodeOneAddressType(addressData.Scope, addressData.Ephemeral, wafEncodeTimer) if err != nil { wafEncodeTimer.Stop() return res, err @@ -180,7 +186,7 @@ func merge[K comparable, V any](a, b map[K][]V) (merged map[K][]V) { // is a nil map, but this behaviour is expected since either persistent or ephemeral addresses are allowed to be null // one at a time. In this case, Encode will return nil contrary to Encode which will return a nil wafObject, // which is what we need to send to ddwaf_run to signal that the address data is empty. -func (context *Context) encodeOneAddressType(addressData map[string]any, timer timer.Timer) (*bindings.WafObject, encoder, error) { +func (context *Context) encodeOneAddressType(scope Scope, addressData map[string]any, timer timer.Timer) (*bindings.WafObject, encoder, error) { encoder := newLimitedEncoder(timer) if addressData == nil { return nil, encoder, nil @@ -191,7 +197,7 @@ func (context *Context) encodeOneAddressType(addressData map[string]any, timer t context.mutex.Lock() defer context.mutex.Unlock() - context.truncations = merge(context.truncations, encoder.truncations) + context.truncations[scope] = merge(context.truncations[scope], encoder.truncations) } if timer.Exhausted() { @@ -269,14 +275,15 @@ func (context *Context) Close() { // TotalRuntime returns the cumulated WAF runtime across various run calls within the same WAF context. // Returned time is in nanoseconds. -// Deprecated: use Timings instead +// Deprecated: use Stats instead func (context *Context) TotalRuntime() (uint64, uint64) { - return uint64(context.metrics.get(wafRunTag)), uint64(context.metrics.get(wafDurationTag)) + return uint64(context.metrics.get(DefaultScope, wafRunTag)), uint64(context.metrics.get(DefaultScope, wafDurationTag)) } // TotalTimeouts returns the cumulated amount of WAF timeouts across various run calls within the same WAF context. +// Deprecated: use Stats instead func (context *Context) TotalTimeouts() uint64 { - return context.timeoutCount.Load() + return context.timeoutCount[DefaultScope].Load() } // Stats returns the cumulative time spent in various parts of the WAF, all in nanoseconds @@ -285,15 +292,36 @@ func (context *Context) Stats() Stats { context.mutex.Lock() defer context.mutex.Unlock() - truncations := make(map[TruncationReason][]int, len(context.truncations)) - for reason, counts := range context.truncations { + truncations := make(map[TruncationReason][]int, len(context.truncations[DefaultScope])) + for reason, counts := range context.truncations[DefaultScope] { truncations[reason] = make([]int, len(counts)) copy(truncations[reason], counts) } + raspTruncations := make(map[TruncationReason][]int, len(context.truncations[RASPScope])) + for reason, counts := range context.truncations[RASPScope] { + raspTruncations[reason] = make([]int, len(counts)) + copy(raspTruncations[reason], counts) + } + + var ( + timeoutDefault uint64 + timeoutRASP uint64 + ) + + if atomic, ok := context.timeoutCount[DefaultScope]; ok { + timeoutDefault = atomic.Load() + } + + if atomic, ok := context.timeoutCount[RASPScope]; ok { + timeoutRASP = atomic.Load() + } + return Stats{ - Timers: context.metrics.copy(), - TimeoutCount: context.timeoutCount.Load(), - Truncations: truncations, + Timers: context.metrics.timers(), + TimeoutCount: timeoutDefault, + TimeoutRASPCount: timeoutRASP, + Truncations: truncations, + TruncationsRASP: raspTruncations, } } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/handle.go b/vendor/github.com/DataDog/go-libddwaf/v3/handle.go index daa6cb61..8c37f90e 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/handle.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/handle.go @@ -8,14 +8,13 @@ package waf import ( "errors" "fmt" + "sync/atomic" "time" wafErrors "github.com/DataDog/go-libddwaf/v3/errors" "github.com/DataDog/go-libddwaf/v3/internal/bindings" "github.com/DataDog/go-libddwaf/v3/internal/unsafe" "github.com/DataDog/go-libddwaf/v3/timer" - - "sync/atomic" ) // Handle represents an instance of the WAF for a given ruleset. @@ -70,7 +69,7 @@ func NewHandle(rules any, keyObfuscatorRegex string, valueObfuscatorRegex string cHandle := wafLib.WafInit(obj, config, diagnosticsWafObj) // Upon failure, the WAF may have produced some diagnostics to help signal what went wrong... var ( - diags *Diagnostics + diags = new(Diagnostics) diagsErr error ) if !diagnosticsWafObj.IsInvalid() { @@ -133,7 +132,17 @@ func (handle *Handle) NewContextWithBudget(budget time.Duration) (*Context, erro return nil, err } - return &Context{handle: handle, cContext: cContext, timer: timer, metrics: metricsStore{data: make(map[string]time.Duration, 5)}}, nil + return &Context{ + handle: handle, + cContext: cContext, + timer: timer, + metrics: metricsStore{data: make(map[metricKey]time.Duration, 5)}, + truncations: make(map[Scope]map[TruncationReason][]int, 2), + timeoutCount: map[Scope]*atomic.Uint64{ + DefaultScope: new(atomic.Uint64), + RASPScope: new(atomic.Uint64), + }, + }, nil } // Diagnostics returns the rules initialization metrics for the current WAF handle @@ -141,11 +150,16 @@ func (handle *Handle) Diagnostics() Diagnostics { return handle.diagnostics } -// Addresses returns the list of addresses the WAF rule is expecting. +// Addresses returns the list of addresses the WAF has been configured to monitor based on the input ruleset func (handle *Handle) Addresses() []string { return wafLib.WafKnownAddresses(handle.cHandle) } +// Actions returns the list of actions the WAF has been configured to monitor based on the input ruleset +func (handle *Handle) Actions() []string { + return wafLib.WafKnownActions(handle.cHandle) +} + // Update the ruleset of a WAF instance into a new handle on its own // the previous handle still needs to be closed manually func (handle *Handle) Update(newRules any) (*Handle, error) { diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go index 802cd345..c50d3c6f 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl.go @@ -32,6 +32,7 @@ type wafSymbols struct { update uintptr destroy uintptr knownAddresses uintptr + knownActions uintptr getVersion uintptr contextInit uintptr contextDestroy uintptr @@ -40,23 +41,23 @@ type wafSymbols struct { run uintptr } -// newWafDl loads the libddwaf shared library and resolves all tge relevant symbols. +// NewWafDl loads the libddwaf shared library and resolves all tge relevant symbols. // The caller is responsible for calling wafDl.Close on the returned object once they // are done with it so that associated resources can be released. func NewWafDl() (dl *WafDl, err error) { - file, err := lib.DumpEmbeddedWAF() + path, closer, err := lib.DumpEmbeddedWAF() if err != nil { - return + return nil, fmt.Errorf("dump embedded WAF: %w", err) } defer func() { - if rmErr := os.Remove(file); rmErr != nil { - err = errors.Join(err, fmt.Errorf("error removing %s: %w", file, rmErr)) + if rmErr := closer(); rmErr != nil { + err = errors.Join(err, fmt.Errorf("error removing %s: %w", path, rmErr)) } }() var handle uintptr - if handle, err = purego.Dlopen(file, purego.RTLD_GLOBAL|purego.RTLD_NOW); err != nil { - return + if handle, err = purego.Dlopen(path, purego.RTLD_GLOBAL|purego.RTLD_NOW); err != nil { + return nil, fmt.Errorf("load a dynamic library file: %w", err) } var symbols wafSymbols @@ -84,7 +85,7 @@ func NewWafDl() (dl *WafDl, err error) { if val := os.Getenv(log.EnvVarLogLevel); val != "" { setLogSym, symErr := purego.Dlsym(handle, "ddwaf_set_log_cb") if symErr != nil { - return + return nil, fmt.Errorf("get symbol: %w", symErr) } logLevel := log.LevelNamed(val) dl.syscall(setLogSym, log.CallbackFunctionPointer(), uintptr(logLevel)) @@ -97,12 +98,12 @@ func (waf *WafDl) Close() error { return purego.Dlclose(waf.handle) } -// wafGetVersion returned string is a static string so we do not need to free it +// WafGetVersion returned string is a static string so we do not need to free it func (waf *WafDl) WafGetVersion() string { return unsafe.Gostring(unsafe.Cast[byte](waf.syscall(waf.getVersion))) } -// wafInit initializes a new WAF with the provided ruleset, configuration and info objects. A +// WafInit initializes a new WAF with the provided ruleset, configuration and info objects. A // cgoRefPool ensures that the provided input values are not moved or garbage collected by the Go // runtime during the WAF call. func (waf *WafDl) WafInit(ruleset *WafObject, config *WafConfig, info *WafObject) WafHandle { @@ -125,15 +126,15 @@ func (waf *WafDl) WafDestroy(handle WafHandle) { unsafe.KeepAlive(handle) } -// wafKnownAddresses returns static strings so we do not need to free them -func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { +func (waf *WafDl) wafKnownX(handle WafHandle, symbol uintptr) []string { var nbAddresses uint32 - arrayVoidC := waf.syscall(waf.knownAddresses, uintptr(handle), unsafe.PtrToUintptr(&nbAddresses)) + arrayVoidC := waf.syscall(symbol, uintptr(handle), unsafe.PtrToUintptr(&nbAddresses)) if arrayVoidC == 0 { return nil } + // These C strings are static strings so we do not need to free them addresses := make([]string, int(nbAddresses)) for i := 0; i < int(nbAddresses); i++ { addresses[i] = unsafe.Gostring(*unsafe.CastWithOffset[*byte](arrayVoidC, uint64(i))) @@ -145,6 +146,14 @@ func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { return addresses } +func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { + return waf.wafKnownX(handle, waf.knownAddresses) +} + +func (waf *WafDl) WafKnownActions(handle WafHandle) []string { + return waf.wafKnownX(handle, waf.knownActions) +} + func (waf *WafDl) WafContextInit(handle WafHandle) WafContext { ctx := WafContext(waf.syscall(waf.contextInit, uintptr(handle))) unsafe.KeepAlive(handle) @@ -207,6 +216,9 @@ func resolveWafSymbols(handle uintptr) (symbols wafSymbols, err error) { if symbols.knownAddresses, err = purego.Dlsym(handle, "ddwaf_known_addresses"); err != nil { return } + if symbols.knownActions, err = purego.Dlsym(handle, "ddwaf_known_actions"); err != nil { + return + } if symbols.getVersion, err = purego.Dlsym(handle, "ddwaf_get_version"); err != nil { return } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go index 9e370827..9745bef1 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/bindings/waf_dl_unsupported.go @@ -33,6 +33,10 @@ func (waf *WafDl) WafKnownAddresses(handle WafHandle) []string { return nil } +func (waf *WafDl) WafKnownActions(handle WafHandle) []string { + return nil +} + func (waf *WafDl) WafContextInit(handle WafHandle) WafContext { return 0 } diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version index 74406836..7df3a13a 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/.version @@ -1 +1 @@ -1.18.0 \ No newline at end of file +1.19.1 \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_darwin.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_darwin.go new file mode 100644 index 00000000..b0ec8e10 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_darwin.go @@ -0,0 +1,57 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build darwin && (amd64 || arm64) && !go1.24 && !datadog.no_waf && (cgo || appsec) + +package lib + +import ( + "bytes" + "compress/gzip" + _ "embed" + "errors" + "fmt" + "io" + "os" +) + +// DumpEmbeddedWAF for darwin platform. +// DumpEmbeddedWAF creates a temporary file with the embedded WAF library content and returns the path to the file, +// a closer function and an error. This is the only way to make all implementations of DumpEmbeddedWAF consistent +// across all platforms. +func DumpEmbeddedWAF() (path string, closer func() error, err error) { + file, err := os.CreateTemp("", "libddwaf-*.dylib") + if err != nil { + return "", nil, fmt.Errorf("error creating temp file: %w", err) + } + + defer func() { + if err != nil { + if closeErr := file.Close(); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) + } + if rmErr := os.Remove(file.Name()); rmErr != nil { + err = errors.Join(err, fmt.Errorf("error removing file: %w", rmErr)) + } + } + }() + + gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) + if err != nil { + return "", nil, fmt.Errorf("error creating gzip reader: %w", err) + } + + if _, err := io.Copy(file, gr); err != nil { + return "", nil, fmt.Errorf("error copying gzip content to file: %w", err) + } + + if err := gr.Close(); err != nil { + return "", nil, fmt.Errorf("error closing gzip reader: %w", err) + } + + return file.Name(), func() error { + return errors.Join(file.Close(), os.Remove(file.Name())) + }, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_linux.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_linux.go new file mode 100644 index 00000000..c2767fba --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/dump_waf_linux.go @@ -0,0 +1,58 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && (amd64 || arm64) && !go1.24 && !datadog.no_waf && (cgo || appsec) + +package lib + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// DumpEmbeddedWAF for linux systems. +// It creates a memfd and writes the embedded WAF library to it. Then it returns the path the /proc/self/fd/ path +// to the file. This trick makes us able to load the library without having to write it to disk. +// Hence, making go-libddwaf work on full read-only filesystems. +func DumpEmbeddedWAF() (path string, closer func() error, err error) { + fd, err := unix.MemfdCreate("libddwaf", 0) + if err != nil { + return "", nil, fmt.Errorf("error creating memfd: %w", err) + } + + file := os.NewFile(uintptr(fd), fmt.Sprintf("/proc/self/fd/%d", fd)) + if file == nil { + return "", nil, errors.New("error creating file from fd") + } + + defer func() { + if file != nil && err != nil { + if closeErr := file.Close(); closeErr != nil { + err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) + } + } + }() + + gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) + if err != nil { + return "", nil, fmt.Errorf("error creating gzip reader: %w", err) + } + + if _, err := io.Copy(file, gr); err != nil { + return "", nil, fmt.Errorf("error copying gzip content to memfd: %w", err) + } + + if err := gr.Close(); err != nil { + return "", nil, fmt.Errorf("error closing gzip reader: %w", err) + } + + return file.Name(), file.Close, nil +} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go deleted file mode 100644 index f656122a..00000000 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib.go +++ /dev/null @@ -1,61 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build ((darwin && (amd64 || arm64)) || (linux && (amd64 || arm64))) && !go1.24 && !datadog.no_waf && (cgo || appsec) - -package lib - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - _ "embed" -) - -//go:embed .version -var EmbeddedWAFVersion string - -func DumpEmbeddedWAF() (path string, err error) { - file, err := os.CreateTemp("", embedNamePattern) - if err != nil { - return path, fmt.Errorf("error creating temp file: %w", err) - } - path = file.Name() - - defer func() { - if closeErr := file.Close(); closeErr != nil { - err = errors.Join(err, fmt.Errorf("error closing file: %w", closeErr)) - } - if path != "" && err != nil { - if rmErr := os.Remove(path); rmErr != nil { - err = errors.Join(err, fmt.Errorf("error removing file: %w", rmErr)) - } - } - }() - - gr, err := gzip.NewReader(bytes.NewReader(libddwaf)) - if err != nil { - return path, fmt.Errorf("error creating gzip reader: %w", err) - } - - uncompressedLibddwaf, err := io.ReadAll(gr) - if err != nil { - return path, fmt.Errorf("error reading gzip content: %w", err) - } - - if err := gr.Close(); err != nil { - return path, fmt.Errorf("error closing gzip reader: %w", err) - } - - if err := os.WriteFile(file.Name(), uncompressedLibddwaf, 0400); err != nil { - return path, fmt.Errorf("error writing file: %w", err) - } - - return path, nil -} diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go index 27c7acf0..16868569 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_amd64.go @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-darwin-amd64.dylib.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.dylib" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go index 3133ac40..eca7b655 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_darwin_arm64.go @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-darwin-arm64.dylib.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.dylib" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go index 9e72cdca..97d24c62 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_amd64.go @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-linux-amd64.so.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.so" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go index e8be318d..1f2e50af 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/lib_linux_arm64.go @@ -13,5 +13,3 @@ import _ "embed" // Needed for go:embed //go:embed libddwaf-linux-arm64.so.gz var libddwaf []byte - -const embedNamePattern = "libddwaf-*.so" diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz index 75ed8874..c2f1a351 100644 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz and b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-amd64.dylib.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz index 99e0af6a..bcbe9c54 100644 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz and b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-darwin-arm64.dylib.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz index ddde4e44..d858a354 100644 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz and b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-amd64.so.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz index b1e50616..c92a3447 100644 Binary files a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz and b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/libddwaf-linux-arm64.so.gz differ diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/version.go b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/version.go new file mode 100644 index 00000000..8c0828e9 --- /dev/null +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/lib/version.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package lib + +import ( + _ "embed" // For go:embed +) + +//go:embed .version +var EmbeddedWAFVersion string diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h b/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h index cfad7f93..b23be0e9 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h +++ b/vendor/github.com/DataDog/go-libddwaf/v3/internal/log/ddwaf.h @@ -228,7 +228,7 @@ ddwaf_handle ddwaf_update(ddwaf_handle handle, const ddwaf_object *ruleset, * * Destroy a WAF instance. * - * @param Handle to the WAF instance. + * @param handle Handle to the WAF instance. */ void ddwaf_destroy(ddwaf_handle handle); @@ -242,16 +242,34 @@ void ddwaf_destroy(ddwaf_handle handle); * * The memory is owned by the WAF and should not be freed. * - * @param Handle to the WAF instance. + * @param handle Handle to the WAF instance. * @param size Output parameter in which the size will be returned. The value of * size will be 0 if the return value is NULL. * @return NULL if empty, otherwise a pointer to an array with size elements. * - * @Note The returned array should be considered invalid after calling ddwaf_destroy + * @note This function is not thread-safe + * @note The returned array should be considered invalid after calling ddwaf_destroy * on the handle used to obtain it. **/ const char* const* ddwaf_known_addresses(const ddwaf_handle handle, uint32_t *size); - +/** + * ddwaf_known_actions + * + * Get an array of all the action types which could be triggered as a result of + * the current set of rules and exclusion filters. + * + * The memory is owned by the WAF and should not be freed. + * + * @param handle Handle to the WAF instance. + * @param size Output parameter in which the size will be returned. The value of + * size will be 0 if the return value is NULL. + * @return NULL if empty, otherwise a pointer to an array with size elements. + * + * @note This function is not thread-safe + * @note The returned array should be considered invalid after calling ddwaf_destroy + * on the handle used to obtain it. + **/ +const char *const *ddwaf_known_actions(const ddwaf_handle handle, uint32_t *size); /** * ddwaf_context_init * diff --git a/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go b/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go index 86130995..50973653 100644 --- a/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go +++ b/vendor/github.com/DataDog/go-libddwaf/v3/metrics.go @@ -6,92 +6,131 @@ package waf import ( - "fmt" + "strings" "sync" "time" ) // Stats stores the metrics collected by the WAF. -type Stats struct { - // Timers returns a map of metrics and their durations. - Timers map[string]time.Duration +type ( + Stats struct { + // Timers returns a map of metrics and their durations. + Timers map[string]time.Duration - // Timeout - TimeoutCount uint64 + // TimeoutCount for the Default Scope i.e. "waf" + TimeoutCount uint64 - // Truncations provides details about truncations that occurred while - // encoding address data for WAF execution. - Truncations map[TruncationReason][]int -} + // TimeoutRASPCount for the RASP Scope i.e. "rasp" + TimeoutRASPCount uint64 + + // Truncations provides details about truncations that occurred while + // encoding address data for WAF execution. + Truncations map[TruncationReason][]int + + // TruncationsRASP provides details about truncations that occurred while + // encoding address data for RASP execution. + TruncationsRASP map[TruncationReason][]int + } + + // Scope is the way to classify the different runs in the same context in order to have different metrics + Scope string + + metricKey struct { + scope Scope + component string + } + + metricsStore struct { + data map[metricKey]time.Duration + mutex sync.RWMutex + } +) const ( - wafEncodeTag = "_dd.appsec.waf.encode" - wafRunTag = "_dd.appsec.waf.duration_ext" - wafDurationTag = "_dd.appsec.waf.duration" - wafDecodeTag = "_dd.appsec.waf.decode" - wafTimeoutTag = "_dd.appsec.waf.timeouts" - wafTruncationTag = "_dd.appsec.waf.truncations" + DefaultScope Scope = "waf" + RASPScope Scope = "rasp" ) -// Metrics transform the stats returned by the WAF into a map of key value metrics for datadog backend +const ( + wafEncodeTag = "encode" + wafRunTag = "duration_ext" + wafDurationTag = "duration" + wafDecodeTag = "decode" + wafTimeoutTag = "timeouts" + wafTruncationTag = "truncations" +) + +func dot(parts ...string) string { + return strings.Join(parts, ".") +} + +// Metrics transform the stats returned by the WAF into a map of key value metrics with values in microseconds. +// ex. {"waf.encode": 100, "waf.duration_ext": 300, "waf.duration": 200, "rasp.encode": 100, "rasp.duration_ext": 300, "rasp.duration": 200} func (stats Stats) Metrics() map[string]any { tags := make(map[string]any, len(stats.Timers)+len(stats.Truncations)+1) for k, v := range stats.Timers { tags[k] = float64(v.Nanoseconds()) / float64(time.Microsecond) // The metrics should be in microseconds } - tags[wafTimeoutTag] = stats.TimeoutCount + if stats.TimeoutCount > 0 { + tags[dot(string(DefaultScope), wafTimeoutTag)] = stats.TimeoutCount + } + + if stats.TimeoutRASPCount > 0 { + tags[dot(string(RASPScope), wafTimeoutTag)] = stats.TimeoutRASPCount + } + for reason, list := range stats.Truncations { - tags[fmt.Sprintf("%s.%s", wafTruncationTag, reason.String())] = list + tags[dot(string(DefaultScope), wafTruncationTag, reason.String())] = list } - return tags -} + for reason, list := range stats.TruncationsRASP { + tags[dot(string(RASPScope), wafTruncationTag, reason.String())] = list + } -type metricsStore struct { - data map[string]time.Duration - mutex sync.RWMutex + return tags } -func (metrics *metricsStore) add(key string, duration time.Duration) { +func (metrics *metricsStore) add(scope Scope, component string, duration time.Duration) { metrics.mutex.Lock() defer metrics.mutex.Unlock() if metrics.data == nil { - metrics.data = make(map[string]time.Duration, 5) + metrics.data = make(map[metricKey]time.Duration, 5) } - metrics.data[key] += duration + metrics.data[metricKey{scope, component}] += duration } -func (metrics *metricsStore) get(key string) time.Duration { +func (metrics *metricsStore) get(scope Scope, component string) time.Duration { metrics.mutex.RLock() defer metrics.mutex.RUnlock() - return metrics.data[key] + return metrics.data[metricKey{scope, component}] } -func (metrics *metricsStore) copy() map[string]time.Duration { +func (metrics *metricsStore) timers() map[string]time.Duration { metrics.mutex.Lock() defer metrics.mutex.Unlock() if metrics.data == nil { return nil } - copy := make(map[string]time.Duration, len(metrics.data)) + timers := make(map[string]time.Duration, len(metrics.data)) for k, v := range metrics.data { - copy[k] = v + timers[dot(string(k.scope), k.component)] = v } - return copy + return timers } // merge merges the current metrics with new ones -func (metrics *metricsStore) merge(other map[string]time.Duration) { +func (metrics *metricsStore) merge(scope Scope, other map[string]time.Duration) { metrics.mutex.Lock() defer metrics.mutex.Unlock() if metrics.data == nil { - metrics.data = make(map[string]time.Duration, 5) + metrics.data = make(map[metricKey]time.Duration, 5) } - for key, val := range other { + for component, val := range other { + key := metricKey{scope, component} prev, ok := metrics.data[key] if !ok { prev = 0 diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/README.md b/vendor/github.com/DataDog/go-sqllexer/testdata/README.md deleted file mode 100644 index 1237e812..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Test Suite - -The test suite is a collection of test SQL statements that are organized per DBMS. The test suite is used to test the SQL obfuscator and normalizer for correctness and completeness. It is also intended to cover DBMS specific edge cases, that are not covered by the generic unit tests. - -## Test Suite Structure - -The test suite is organized in the following way: - -```text -testdata -├── README.md -├── dbms1 -│   ├── query_type1 -│   │   ├── test1.json -│   └── query_type2 -│   ├── test1.json -dbms_test.go -``` - -The test suite is organized per DBMS. Each DBMS has a number of query types. Each query type has a number of test cases. Each test case consists of a SQL statement and the expected output of the obfuscator/normalizer. - -## Test File Format - -The test files are simple json files where each test case comes with one input SQL statements and an array of expected outputs. -Each expected output can optionally come with a configuration for the obfuscator and normalizer. The configuration is optional, because the default configuration is used if no configuration is provided. - -testcase.json: - -```json -{ - "input": "SELECT * FROM table1", - "outputs": [ - { - // Test case 1 - "expected": "SELECT * FROM table1", - "obfuscator_config": {...}, // optional - "normalizer_config": {...} // optional - }, - { - // Test case 2 - "expected": "SELECT * FROM table1", - "obfuscator_config": {...}, // optional - "normalizer_config": {...} // optional - } - ] -} -``` - -## How to write a new test case - -1. Create a new directory for the DBMS, if it does not exist yet. (this step is often not necessary) -2. Create a new directory for the query type, if it does not exist yet. -3. Create a new test case `.json` file with the SQL statement and expected output. Refer to the [test file format](#test-file-format) or `testcase struct` in [dbms_test.go](../dbms_test.go) for more details. -4. Run the test suite to verify that the test case is working as expected. diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json deleted file mode 100644 index a8442bdb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/extremely-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH ComplexCTE AS (SELECT t1.id, t2.amount, ROW_NUMBER() OVER(PARTITION BY t1.customer_id ORDER BY t2.amount DESC) AS rn FROM (SELECT id, customer_id, status FROM orders WHERE YEAR(order_date) = YEAR(GETDATE()) AND status NOT IN ('Cancelled', 'Returned')) t1 INNER JOIN (SELECT order_id, SUM(amount) AS amount FROM order_details GROUP BY order_id) t2 ON t1.id = t2.order_id WHERE t2.amount > 500), SecondCTE AS (SELECT c1.*, c2.name, c2.region FROM ComplexCTE c1 INNER JOIN customers c2 ON c1.customer_id = c2.id WHERE c2.region IN ('East', 'West') AND c1.rn < 5) SELECT s.id, s.name, s.amount, p.product_name, CASE WHEN s.amount > 1000 THEN 'High' ELSE 'Low' END AS ValueCategory FROM SecondCTE s LEFT JOIN (SELECT DISTINCT p1.order_id, p2.product_name FROM order_products p1 INNER JOIN products p2 ON p1.product_id = p2.id) p ON s.id = p.order_id WHERE s.region = 'East' AND s.status LIKE '%Active%' ORDER BY s.amount DESC, s.name;", - "outputs": [ - { - "expected": "WITH ComplexCTE AS ( SELECT t?.id, t?.amount, ROW_NUMBER ( ) OVER ( PARTITION BY t?.customer_id ORDER BY t?.amount DESC ) FROM ( SELECT id, customer_id, status FROM orders WHERE YEAR ( order_date ) = YEAR ( GETDATE ( ) ) AND status NOT IN ( ? ) ) t? INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) t? ON t?.id = t?.order_id WHERE t?.amount > ? ), SecondCTE AS ( SELECT c?. *, c?.name, c?.region FROM ComplexCTE c? INNER JOIN customers c? ON c?.customer_id = c?.id WHERE c?.region IN ( ? ) AND c?.rn < ? ) SELECT s.id, s.name, s.amount, p.product_name, CASE WHEN s.amount > ? THEN ? ELSE ? END FROM SecondCTE s LEFT JOIN ( SELECT DISTINCT p?.order_id, p?.product_name FROM order_products p? INNER JOIN products p? ON p?.product_id = p?.id ) p ON s.id = p.order_id WHERE s.region = ? AND s.status LIKE ? ORDER BY s.amount DESC, s.name", - "statement_metadata": { - "size": 79, - "tables": ["orders", "order_details", "ComplexCTE", "customers", "SecondCTE", "order_products", "products"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json deleted file mode 100644 index 5d78886d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/indexed-views.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "input": "CREATE VIEW dbo.OrderSummary WITH SCHEMABINDING AS SELECT customer_id, COUNT_BIG(*) AS TotalOrders, SUM(amount) AS TotalAmount FROM dbo.orders GROUP BY customer_id; CREATE UNIQUE CLUSTERED INDEX IDX_V1 ON dbo.OrderSummary(customer_id);", - "outputs": [ - { - "expected": "CREATE VIEW dbo.OrderSummary WITH SCHEMABINDING AS SELECT customer_id, COUNT_BIG ( * ), SUM ( amount ) FROM dbo.orders GROUP BY customer_id; CREATE UNIQUE CLUSTERED INDEX IDX_V? ON dbo.OrderSummary ( customer_id )", - "statement_metadata": { - "size": 22, - "tables": ["dbo.orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [], - "views": ["dbo.OrderSummary"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json deleted file mode 100644 index c06612ec..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/partitioned-tables-indexes.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PARTITION FUNCTION myRangePF1 (INT) AS RANGE LEFT FOR VALUES (1, 100, 1000); CREATE PARTITION SCHEME myScheme AS PARTITION myRangePF1 TO ([PRIMARY], [SECONDARY], [TERTIARY]); CREATE TABLE partitionedTable (id INT) ON myScheme(id);", - "outputs": [ - { - "expected": "CREATE PARTITION FUNCTION myRangePF? ( INT ) LEFT FOR VALUES ( ? ); CREATE PARTITION SCHEME myScheme myRangePF? TO ( PRIMARY, SECONDARY, TERTIARY ); CREATE TABLE partitionedTable ( id INT ) ON myScheme ( id )", - "statement_metadata": { - "size": 22, - "tables": ["partitionedTable"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json deleted file mode 100644 index 41bd0e0e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/complex/super-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.*, b.totalAmount, CASE WHEN c.id IS NOT NULL THEN d.description ELSE 'N/A' END AS description FROM (SELECT id, name, status, customer_id FROM orders WHERE order_date > DATEADD(month, -6, GETDATE()) AND status IN ('Pending', 'Completed') AND customer_id IN (SELECT customer_id FROM customers WHERE region IN ('East', 'West') AND last_order_date > DATEADD(year, -1, GETDATE())) ORDER BY name DESC) a INNER JOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details GROUP BY order_id) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN (SELECT DISTINCT status, description FROM status_descriptions) d ON a.status = d.status WHERE a.name LIKE '%test%' AND (b.totalAmount > 1000 OR b.totalAmount IS NULL) ORDER BY a.order_date DESC, a.name;", - "outputs": [ - { - "expected": "SELECT a. *, b.totalAmount, CASE WHEN c.id IS NOT ? THEN d.description ELSE ? END FROM ( SELECT id, name, status, customer_id FROM orders WHERE order_date > DATEADD ( month, ?, GETDATE ( ) ) AND status IN ( ? ) AND customer_id IN ( SELECT customer_id FROM customers WHERE region IN ( ? ) AND last_order_date > DATEADD ( year, ?, GETDATE ( ) ) ) ORDER BY name DESC ) a INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN ( SELECT DISTINCT status, description FROM status_descriptions ) d ON a.status = d.status WHERE a.name LIKE ? AND ( b.totalAmount > ? OR b.totalAmount IS ? ) ORDER BY a.order_date DESC, a.name", - "statement_metadata": { - "size": 66, - "tables": ["orders", "customers", "order_details", "audit_log", "status_descriptions"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json deleted file mode 100644 index 66a0da8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/conditional-delete-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = CASE WHEN order_date < GETDATE() - 90 THEN 'Expired' ELSE 'Active' END;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = CASE WHEN order_date < GETDATE ( ) - ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json deleted file mode 100644 index eb9025d8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json deleted file mode 100644 index 2e4a3b54..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE region = 'North'; -- Assuming CASCADE DELETE is set up on the foreign key in the orders table", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE region = ?", - "statement_metadata": { - "size": 90, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assuming CASCADE DELETE is set up on the foreign key in the orders table"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json deleted file mode 100644 index b5649652..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-rowlock-hint.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WITH (ROWLOCK) WHERE status = 'Pending';", - "outputs": [ - { - "expected": "DELETE FROM orders WITH ( ROWLOCK ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json deleted file mode 100644 index 2d739ca4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'West');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json deleted file mode 100644 index 91809fe3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-using-table-variable.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @ExpiredOrders TABLE (id INT); INSERT INTO @ExpiredOrders (id) SELECT id FROM orders WHERE order_date < GETDATE() - 365; DELETE FROM orders WHERE id IN (SELECT id FROM @ExpiredOrders);", - "outputs": [ - { - "expected": "DECLARE @ExpiredOrders TABLE ( id INT ); INSERT INTO @ExpiredOrders ( id ) SELECT id FROM orders WHERE order_date < GETDATE ( ) - ?; DELETE FROM orders WHERE id IN ( SELECT id FROM @ExpiredOrders )", - "statement_metadata": { - "size": 24, - "tables": ["orders"], - "commands": ["INSERT", "SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json deleted file mode 100644 index 69121952..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH OldOrders AS (SELECT id FROM orders WHERE order_date < '2022-01-01') DELETE FROM orders WHERE id IN (SELECT id FROM OldOrders);", - "outputs": [ - { - "expected": "WITH OldOrders AS ( SELECT id FROM orders WHERE order_date < ? ) DELETE FROM orders WHERE id IN ( SELECT id FROM OldOrders )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "OldOrders"], - "commands": ["SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json deleted file mode 100644 index 7fa63f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'East' AND o.status = 'Pending';", - "outputs": [ - { - "expected": "DELETE o FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json deleted file mode 100644 index b3373cc5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders OUTPUT DELETED.* WHERE status = 'Shipped';", - "outputs": [ - { - "expected": "DELETE FROM orders OUTPUT DELETED. * WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json deleted file mode 100644 index 12116c3b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/delete/delete-with-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE TOP (10) FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "DELETE TOP ( ? ) FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json deleted file mode 100644 index fe1d97fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) VALUES (1, GETDATE(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) VALUES ( ?, GETDATE ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json deleted file mode 100644 index 9c8a08ad..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-default-values.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders DEFAULT VALUES;", - "outputs": [ - { - "expected": "INSERT INTO orders DEFAULT VALUES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json deleted file mode 100644 index 5fdc3e03..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-identity-insert.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET IDENTITY_INSERT orders ON; INSERT INTO orders (id, customer_id, order_date, status) VALUES (100, 3, GETDATE(), 'Pending'); SET IDENTITY_INSERT orders OFF;", - "outputs": [ - { - "expected": "SET IDENTITY_INSERT orders ON; INSERT INTO orders ( id, customer_id, order_date, status ) VALUES ( ?, GETDATE ( ), ? ); SET IDENTITY_INSERT orders OFF", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json deleted file mode 100644 index 81d75e6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-merge.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "MERGE INTO orders AS target USING (SELECT customer_id, order_date, status FROM incoming_orders) AS source ON target.id = source.id WHEN NOT MATCHED THEN INSERT (customer_id, order_date, status) VALUES (source.customer_id, source.order_date, source.status);", - "outputs": [ - { - "expected": "MERGE INTO orders USING ( SELECT customer_id, order_date, status FROM incoming_orders ) ON target.id = source.id WHEN NOT MATCHED THEN INSERT ( customer_id, order_date, status ) VALUES ( source.customer_id, source.order_date, source.status )", - "statement_metadata": { - "size": 38, - "tables": ["orders", "incoming_orders"], - "commands": ["MERGE", "SELECT", "INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json deleted file mode 100644 index d16031fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) OUTPUT INSERTED.id VALUES (3, GETDATE(), 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) OUTPUT INSERTED.id VALUES ( ?, GETDATE ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json deleted file mode 100644 index 74700113..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-select-into.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * INTO new_orders FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SELECT * INTO new_orders FROM orders WHERE status = ?", - "statement_metadata": { - "size": 22, - "tables": ["new_orders", "orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json deleted file mode 100644 index af7a2a0f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-subquery-values.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO order_totals (order_id, total_amount) VALUES ((SELECT MAX(id) FROM orders), 500);", - "outputs": [ - { - "expected": "INSERT INTO order_totals ( order_id, total_amount ) VALUES ( ( SELECT MAX ( id ) FROM orders ), ? )", - "statement_metadata": { - "size": 30, - "tables": ["order_totals", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json deleted file mode 100644 index 2469fd38..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-top-orderby.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO top_orders (id, amount) SELECT TOP 5 id, amount FROM orders ORDER BY amount DESC;", - "outputs": [ - { - "expected": "INSERT INTO top_orders ( id, amount ) SELECT TOP ? id, amount FROM orders ORDER BY amount DESC", - "statement_metadata": { - "size": 28, - "tables": ["top_orders", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json deleted file mode 100644 index ec79720b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-values-multiple-rows.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (name, region) VALUES ('John Doe', 'North'), ('Jane Smith', 'South');", - "outputs": [ - { - "expected": "INSERT INTO customers ( name, region ) VALUES ( ? ), ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json deleted file mode 100644 index 30392800..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/insert-with-select.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders_archive (id, customer_id, order_date, status) SELECT id, customer_id, order_date, status FROM orders WHERE status = 'Completed';", - "outputs": [ - { - "expected": "INSERT INTO orders_archive ( id, customer_id, order_date, status ) SELECT id, customer_id, order_date, status FROM orders WHERE status = ?", - "statement_metadata": { - "size": 32, - "tables": ["orders_archive", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json deleted file mode 100644 index 8458f773..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/insert/using-throw-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "BEGIN TRY INSERT INTO orders (customer_id, amount) VALUES (1, -100); END TRY BEGIN CATCH THROW; END CATCH;", - "outputs": [ - { - "expected": "BEGIN TRY INSERT INTO orders ( customer_id, amount ) VALUES ( ? ); END TRY BEGIN CATCH THROW; END CATCH", - "statement_metadata": { - "size": 17, - "tables": ["orders"], - "commands": ["BEGIN", "INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json deleted file mode 100644 index 485e68de..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-operations.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ManageCustomerOrders @customerId INT AS BEGIN SET NOCOUNT ON; IF NOT EXISTS (SELECT 1 FROM customers WHERE id = @customerId) BEGIN THROW 50001, 'Customer not found.', 1; END; UPDATE orders SET status = 'Reviewed' WHERE customer_id = @customerId AND status = 'Pending'; INSERT INTO audit_log (description) VALUES ('Orders reviewed for customer ' + CAST(@customerId AS NVARCHAR(10))); END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ManageCustomerOrders @customerId INT AS BEGIN SET NOCOUNT ON; IF NOT EXISTS (SELECT ? FROM customers WHERE id = @customerId) BEGIN THROW ?, ?, ?; END; UPDATE orders SET status = ? WHERE customer_id = @customerId AND status = ?; INSERT INTO audit_log (description) VALUES (? + CAST(@customerId AS NVARCHAR(?))); END;", - "statement_metadata": { - "size": 78, - "tables": ["customers", "orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "UPDATE", "INSERT"], - "comments": [], - "procedures": ["ManageCustomerOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json deleted file mode 100644 index 886e455a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/complex-stored-procedure-multiple-statements.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE FullOrderManagement AS\nBEGIN\n-- Comprehensive procedure to manage order lifecycle\n-- It checks, processes, and logs orders.\nSET NOCOUNT ON;\n-- Check for new orders\nUPDATE orders SET status = 'Processing' WHERE status = 'New';\n-- Log the update\nINSERT INTO audit_log (description) VALUES ('Processed new orders.');\n-- Finalize processed orders\nUPDATE orders SET status = 'Finalized' WHERE status = 'Processing';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE FullOrderManagement AS BEGIN SET NOCOUNT ON; UPDATE orders SET status = ? WHERE status = ?; INSERT INTO audit_log (description) VALUES (?); UPDATE orders SET status = ? WHERE status = ?; END;", - "statement_metadata": { - "size": 223, - "tables": ["orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "INSERT"], - "comments": ["-- Comprehensive procedure to manage order lifecycle", "-- It checks, processes, and logs orders.", "-- Check for new orders", "-- Log the update", "-- Finalize processed orders"], - "procedures": ["FullOrderManagement"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json deleted file mode 100644 index 3036098a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-comprehensive-logic-explanation.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ValidateOrderDetails AS\nBEGIN\n/*\n Procedure Name: ValidateOrderDetails\n Purpose: To validate the details of orders before processing.\n Detailed Description:\n This procedure runs through each order in the 'orders' table\n and checks if all required details are present.\n It updates the 'order_status' table with 'Valid' or 'Invalid'.\n It's a critical part of the order processing pipeline to ensure data integrity.\n*/\n-- Validation logic\nUPDATE orders SET status = CASE WHEN customer_id IS NOT NULL AND total_amount IS NOT NULL THEN 'Valid' ELSE 'Invalid' END;\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ValidateOrderDetails AS BEGIN UPDATE orders SET status = CASE WHEN customer_id IS NOT NULL AND total_amount IS NOT NULL THEN ? ELSE ? END; END;", - "statement_metadata": { - "size": 466, - "tables": ["orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE"], - "comments": ["/*\n Procedure Name: ValidateOrderDetails\n Purpose: To validate the details of orders before processing.\n Detailed Description:\n This procedure runs through each order in the 'orders' table\n and checks if all required details are present.\n It updates the 'order_status' table with 'Valid' or 'Invalid'.\n It's a critical part of the order processing pipeline to ensure data integrity.\n*/", "-- Validation logic"], - "procedures": ["ValidateOrderDetails"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json deleted file mode 100644 index 81008418..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-conditional-logic.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE CheckOrderStatus @orderId INT AS\nBEGIN\n-- Checks the status of an order and logs if it's delayed.\n-- This is part of our order monitoring system.\nSET NOCOUNT ON;\nDECLARE @status NVARCHAR(50);\nSELECT @status = status FROM orders WHERE id = @orderId;\nIF @status = 'Delayed'\nBEGIN\n INSERT INTO audit_log (description) VALUES ('Order ' + CAST(@orderId AS NVARCHAR(10)) + ' is delayed.');\nEND\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE CheckOrderStatus @orderId INT AS BEGIN SET NOCOUNT ON; DECLARE @status NVARCHAR(?); SELECT @status = status FROM orders WHERE id = @orderId; IF @status = ? BEGIN INSERT INTO audit_log (description) VALUES (? + CAST(@orderId AS NVARCHAR(?)) + ?); END END;", - "statement_metadata": { - "size": 164, - "tables": ["orders", "audit_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "INSERT"], - "comments": ["-- Checks the status of an order and logs if it's delayed.", "-- This is part of our order monitoring system."], - "procedures": ["CheckOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json deleted file mode 100644 index 7f6ad14e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-cursor-temp-table.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ArchiveOldOrders AS BEGIN SET NOCOUNT ON; DECLARE @orderId INT; DECLARE orderCursor CURSOR FOR SELECT id FROM orders WHERE order_date < GETDATE() - 365; OPEN orderCursor; FETCH NEXT FROM orderCursor INTO @orderId; WHILE @@FETCH_STATUS = 0 BEGIN INSERT INTO orders_archive (id, status) SELECT id, status FROM orders WHERE id = @orderId; FETCH NEXT FROM orderCursor INTO @orderId; END; CLOSE orderCursor; DEALLOCATE orderCursor; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ArchiveOldOrders AS BEGIN SET NOCOUNT ON; DECLARE @orderId INT; DECLARE orderCursor CURSOR FOR SELECT id FROM orders WHERE order_date < GETDATE() - ?; OPEN orderCursor; FETCH NEXT FROM orderCursor INTO @orderId; WHILE @@FETCH_STATUS = ? BEGIN INSERT INTO orders_archive (id, status) SELECT id, status FROM orders WHERE id = @orderId; FETCH NEXT FROM orderCursor INTO @orderId; END; CLOSE orderCursor; DEALLOCATE orderCursor; END;", - "statement_metadata": { - "size": 75, - "tables": ["orders", "orderCursor", "orders_archive"], - "commands": ["CREATE", "ALTER", "BEGIN", "SELECT", "INSERT"], - "comments": [], - "procedures": ["ArchiveOldOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json deleted file mode 100644 index 68166fd5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-detailed-documentation.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS\nBEGIN\n/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/\nSET NOCOUNT ON;\n-- Insert audit records\nINSERT INTO audit_log (description) SELECT 'Order processed: ' + CAST(id AS NVARCHAR(10)) FROM orders WHERE status = 'Processed';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS BEGIN SET NOCOUNT ON; INSERT INTO audit_log (description) SELECT ? + CAST(id AS NVARCHAR(?)) FROM orders WHERE status = ?; END;", - "statement_metadata": { - "size": 478, - "tables": ["audit_log", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT"], - "comments": ["/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/", "-- Insert audit records"], - "procedures": ["AuditOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json deleted file mode 100644 index 8600e30f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-error-handling.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE UpdateOrderStatus @orderId INT, @newStatus NVARCHAR(50) AS BEGIN SET NOCOUNT ON; BEGIN TRY BEGIN TRANSACTION; DECLARE @sql NVARCHAR(MAX) = N'UPDATE orders SET status = ''' + @newStatus + ''' WHERE id = ' + CAST(@orderId AS NVARCHAR(10)) + ';'; EXEC sp_executesql @sql; COMMIT TRANSACTION; END TRY BEGIN CATCH ROLLBACK TRANSACTION; THROW; END CATCH; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE UpdateOrderStatus @orderId INT, @newStatus NVARCHAR(?) AS BEGIN SET NOCOUNT ON; BEGIN TRY BEGIN TRANSACTION; DECLARE @sql NVARCHAR(MAX) = N ? ? + @newStatus + ? ? + CAST(@orderId AS NVARCHAR(?)) + ?; EXEC sp_executesql @sql; COMMIT TRANSACTION; END TRY BEGIN CATCH ROLLBACK TRANSACTION; THROW; END CATCH; END;", - "statement_metadata": { - "size": 43, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC", "COMMIT"], - "comments": [], - "procedures": ["UpdateOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json deleted file mode 100644 index 9d3e043d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-dynamic-sql-execution.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE DynamicCustomerQuery @query NVARCHAR(MAX) AS\nBEGIN\n-- Executes a dynamic SQL query based on the input.\n-- Used for flexible customer data retrieval.\nSET NOCOUNT ON;\nEXEC sp_executesql @query;\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE DynamicCustomerQuery @query NVARCHAR(MAX) AS BEGIN SET NOCOUNT ON; EXEC sp_executesql @query; END;", - "statement_metadata": { - "size": 136, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC"], - "comments": ["-- Executes a dynamic SQL query based on the input.", "-- Used for flexible customer data retrieval."], - "procedures": ["DynamicCustomerQuery"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json deleted file mode 100644 index abf61515..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-executing-another.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE FullOrderProcessing AS BEGIN SET NOCOUNT ON; EXEC ProcessOrders; EXEC UpdateOrderStatus 1, 'Dispatched'; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE FullOrderProcessing AS BEGIN SET NOCOUNT ON; EXEC ProcessOrders; EXEC UpdateOrderStatus ?, ?; END;", - "statement_metadata": { - "size": 39, - "tables": [], - "commands": ["CREATE", "ALTER", "BEGIN", "EXEC"], - "comments": [], - "procedures": ["FullOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json deleted file mode 100644 index d83db6ba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-temp-tables-transaction.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ProcessOrders AS BEGIN SET NOCOUNT ON; BEGIN TRANSACTION; CREATE TABLE #TempOrders (id INT, status NVARCHAR(50)); INSERT INTO #TempOrders (id, status) SELECT id, status FROM orders WHERE status = 'Pending'; UPDATE orders SET status = 'Processing' WHERE status = 'Pending'; COMMIT TRANSACTION; SELECT * FROM #TempOrders; DROP TABLE #TempOrders; END;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ProcessOrders AS BEGIN SET NOCOUNT ON; BEGIN TRANSACTION; CREATE TABLE #TempOrders (id INT, status NVARCHAR(?)); INSERT INTO #TempOrders (id, status) SELECT id, status FROM orders WHERE status = ?; UPDATE orders SET status = ? WHERE status = ?; COMMIT TRANSACTION; SELECT * FROM #TempOrders; DROP TABLE #TempOrders; END;", - "statement_metadata": { - "size": 74, - "tables": ["#TempOrders", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT", "UPDATE", "COMMIT", "DROP"], - "comments": [], - "procedures": ["ProcessOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json deleted file mode 100644 index 909c2889..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-try-catch-error.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE ProcessPayment @orderId INT, @amount MONEY AS\nBEGIN\n-- This procedure processes payments for orders.\n-- It includes error handling using TRY-CATCH.\nSET NOCOUNT ON;\nBEGIN TRY\n -- Attempt to process the payment\n UPDATE orders SET payment_received = 1, payment_amount = @amount WHERE id = @orderId;\nEND TRY\nBEGIN CATCH\n -- Handle the error\n INSERT INTO error_log (error_message) VALUES (ERROR_MESSAGE());\nEND CATCH\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE ProcessPayment @orderId INT, @amount MONEY AS BEGIN SET NOCOUNT ON; BEGIN TRY UPDATE orders SET payment_received = ?, payment_amount = @amount WHERE id = @orderId; END TRY BEGIN CATCH INSERT INTO error_log (error_message) VALUES (ERROR_MESSAGE()); END CATCH END;", - "statement_metadata": { - "size": 203, - "tables": ["orders", "error_log"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "INSERT"], - "comments": ["-- This procedure processes payments for orders.", "-- It includes error handling using TRY-CATCH.", "-- Attempt to process the payment", "-- Handle the error"], - "procedures": ["ProcessPayment"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json deleted file mode 100644 index 68166fd5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-version-control.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS\nBEGIN\n/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/\nSET NOCOUNT ON;\n-- Insert audit records\nINSERT INTO audit_log (description) SELECT 'Order processed: ' + CAST(id AS NVARCHAR(10)) FROM orders WHERE status = 'Processed';\nEND;", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE AuditOrderProcessing AS BEGIN SET NOCOUNT ON; INSERT INTO audit_log (description) SELECT ? + CAST(id AS NVARCHAR(?)) FROM orders WHERE status = ?; END;", - "statement_metadata": { - "size": 478, - "tables": ["audit_log", "orders"], - "commands": ["CREATE", "ALTER", "BEGIN", "INSERT", "SELECT"], - "comments": ["/*\n Procedure: AuditOrderProcessing\n Author: Jane Doe\n Created: 2023-04-15\n Description: This procedure is designed to audit order processing steps.\n It checks each step of the order processing workflow and logs it into the audit_log table.\n Modifications:\n - 2023-04-20: Added additional logging for failed orders.\n - 2023-05-01: Updated logic to include new order status.\n*/", "-- Insert audit records"], - "procedures": ["AuditOrderProcessing"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json deleted file mode 100644 index 9c3d20a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/procedure/stored-procedure-with-params-and-execution.json.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "CREATE OR ALTER PROCEDURE UpdateCustomerStatus @customerId INT, @newStatus NVARCHAR(50) AS\nBEGIN\n-- This procedure updates the status of a customer.\n-- It takes the customer ID and the new status as parameters.\nSET NOCOUNT ON;\nUPDATE customers SET status = @newStatus WHERE id = @customerId;\nEND;\nEXEC UpdateCustomerStatus 123, 'Active';", - "outputs": [ - { - "expected": "CREATE OR ALTER PROCEDURE UpdateCustomerStatus @customerId INT, @newStatus NVARCHAR(?) AS BEGIN SET NOCOUNT ON; UPDATE customers SET status = @newStatus WHERE id = @customerId; END; EXEC UpdateCustomerStatus ?, ?;", - "statement_metadata": { - "size": 167, - "tables": ["customers"], - "commands": ["CREATE", "ALTER", "BEGIN", "UPDATE", "EXEC"], - "comments": ["-- This procedure updates the status of a customer.", "-- It takes the customer ID and the new status as parameters."], - "procedures": ["UpdateCustomerStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json deleted file mode 100644 index 1c103a26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/basic-select.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name, email FROM customers WHERE active = 1;", - "outputs": [ - { - "expected": "SELECT id, name, email FROM customers WHERE active = ?", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json deleted file mode 100644 index 96ca517a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/data-compression-features.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_compressed WITH (DATA_COMPRESSION = PAGE) AS SELECT * FROM orders;", - "outputs": [ - { - "expected": "CREATE TABLE orders_compressed WITH ( DATA_COMPRESSION = PAGE ) AS SELECT * FROM orders", - "statement_metadata": { - "size": 35, - "tables": ["orders_compressed", "orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json deleted file mode 100644 index 15c83fd7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/filetable-storage.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE DocumentStore AS FileTable;", - "outputs": [ - { - "expected": "CREATE TABLE DocumentStore", - "statement_metadata": { - "size": 19, - "tables": ["DocumentStore"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json deleted file mode 100644 index a2e5b22c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/pivot-unpivot-operations.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT customer_id, product_id, amount FROM order_details) AS SourceTable PIVOT (SUM(amount) FOR product_id IN ([1], [2], [3])) AS PivotTable;", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT customer_id, product_id, amount FROM order_details ) PIVOT ( SUM ( amount ) FOR product_id IN ( ? ) )", - "statement_metadata": { - "size": 19, - "tables": ["order_details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json deleted file mode 100644 index 821d7276..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-choose.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name, CHOOSE(department_id, 'Sales', 'Engineering', 'HR') AS DepartmentName FROM employees;", - "outputs": [ - { - "expected": "SELECT id, name, CHOOSE ( department_id, ?, ?, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json deleted file mode 100644 index e1129aae..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-format.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT name, FORMAT(joining_date, 'dd-MM-yyyy') AS FormattedJoiningDate FROM employees;", - "outputs": [ - { - "expected": "SELECT name, FORMAT ( joining_date, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json deleted file mode 100644 index d2096762..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-full-outer-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT c.name, o.order_date FROM customers c FULL OUTER JOIN orders o ON c.id = o.customer_id WHERE c.region = 'West' OR o.amount > 500;", - "outputs": [ - { - "expected": "SELECT c.name, o.order_date FROM customers c FULL OUTER JOIN orders o ON c.id = o.customer_id WHERE c.region = ? OR o.amount > ?", - "statement_metadata": { - "size": 25, - "tables": ["customers", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json deleted file mode 100644 index 18bb4e11..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-identity.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO employees (name, department) VALUES ('John Doe', 'Sales'); SELECT @@IDENTITY AS LastInsertedIdentity;", - "outputs": [ - { - "expected": "INSERT INTO employees ( name, department ) VALUES ( ? ); SELECT @@IDENTITY", - "statement_metadata": { - "size": 21, - "tables": ["employees"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json deleted file mode 100644 index 9ecc3b4a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-iif.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT name, IIF(gender = 'M', 'Male', 'Female') AS GenderDescription FROM employees;", - "outputs": [ - { - "expected": "SELECT name, IIF ( gender = ?, ?, ? ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json deleted file mode 100644 index 1de78462..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-join-aggregation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT c.name, SUM(o.amount) AS total_sales FROM customers c INNER JOIN orders o ON c.id = o.customer_id GROUP BY c.name;", - "outputs": [ - { - "expected": "SELECT c.name, SUM ( o.amount ) FROM customers c INNER JOIN orders o ON c.id = o.customer_id GROUP BY c.name", - "statement_metadata": { - "size": 25, - "tables": ["customers", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json deleted file mode 100644 index f841baab..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-system-user.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT SYSTEM_USER AS CurrentSystemUser, USER_NAME() AS CurrentDatabaseUser, NEWID() AS UniqueIdentifier;", - "outputs": [ - { - "expected": "SELECT SYSTEM_USER, USER_NAME ( ), NEWID ( )", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json deleted file mode 100644 index 3c6c8ce2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-pivot.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT customer_id, product_id, amount FROM orders) AS SourceTable PIVOT (SUM(amount) FOR product_id IN ([1], [2], [3])) AS PivotTable;", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT customer_id, product_id, amount FROM orders ) PIVOT ( SUM ( amount ) FOR product_id IN ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json deleted file mode 100644 index aed4952d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-using-try-convert.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, TRY_CONVERT(float, total_amount) AS TotalFloat FROM orders WHERE TRY_CONVERT(float, total_amount) IS NOT NULL;", - "outputs": [ - { - "expected": "SELECT id, TRY_CONVERT ( float, total_amount ) FROM orders WHERE TRY_CONVERT ( float, total_amount ) IS NOT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json deleted file mode 100644 index 48add7e9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-cte.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "WITH RankedOrders AS (SELECT o.id, o.customer_id, RANK() OVER (PARTITION BY o.customer_id ORDER BY o.amount DESC) AS rnk FROM orders o) SELECT id FROM RankedOrders WHERE rnk = 1;", - "outputs": [ - { - "expected": "WITH RankedOrders AS ( SELECT o.id, o.customer_id, RANK ( ) OVER ( PARTITION BY o.customer_id ORDER BY o.amount DESC ) FROM orders o ) SELECT id FROM RankedOrders WHERE rnk = ?", - "statement_metadata": { - "size": 24, - "tables": ["orders", "RankedOrders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "WITH RankedOrders AS (SELECT o.id, o.customer_id, RANK() OVER (PARTITION BY o.customer_id ORDER BY o.amount DESC) AS rnk FROM orders o) SELECT id FROM RankedOrders WHERE rnk = ?;", - "normalizer_config": { - "keep_sql_alias": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json deleted file mode 100644 index c00765a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-offset-fetch.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, name FROM customers ORDER BY name OFFSET 10 ROWS FETCH NEXT 5 ROWS ONLY;", - "outputs": [ - { - "expected": "SELECT id, name FROM customers ORDER BY name OFFSET ? ROWS FETCH NEXT ? ROWS ONLY", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json deleted file mode 100644 index 838f137e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-string-agg.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT STRING_AGG(name, ', ') AS names FROM customers WHERE region = 'East';", - "outputs": [ - { - "expected": "SELECT STRING_AGG ( name, ? ) FROM customers WHERE region = ?", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json deleted file mode 100644 index 4953ab6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-table-sample.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM customers TABLESAMPLE (10 PERCENT);", - "outputs": [ - { - "expected": "SELECT * FROM customers TABLESAMPLE ( ? PERCENT )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json deleted file mode 100644 index cf918833..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/select-with-window-function.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT id, amount, ROW_NUMBER() OVER (ORDER BY amount DESC) AS rownum FROM orders;", - "outputs": [ - { - "expected": "SELECT id, amount, ROW_NUMBER ( ) OVER ( ORDER BY amount DESC ) AS rownum FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, amount, ROW_NUMBER() OVER (ORDER BY amount DESC) AS rownum FROM orders;", - "normalizer_config": { - "keep_sql_alias": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json deleted file mode 100644 index 4f6cc45c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/service-broker.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE MESSAGE TYPE RequestMessage VALIDATION = WELL_FORMED_XML; CREATE CONTRACT RequestContract (RequestMessage SENT BY INITIATOR);", - "outputs": [ - { - "expected": "CREATE MESSAGE TYPE RequestMessage VALIDATION = WELL_FORMED_XML; CREATE CONTRACT RequestContract ( RequestMessage SENT BY INITIATOR )", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json deleted file mode 100644 index 92519e16..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/spatial-data-types-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT geography::Point(latitude, longitude, 4326).ToString() FROM locations;", - "outputs": [ - { - "expected": "SELECT geography :: Point ( latitude, longitude, ? ) . ToString ( ) FROM locations", - "statement_metadata": { - "size": 15, - "tables": ["locations"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json deleted file mode 100644 index ad9cae93..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/select/xml-data-types-queries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT xmlData.value('(/Customer/Name)[1]', 'nvarchar(100)') AS CustomerName FROM customerRecords;", - "outputs": [ - { - "expected": "SELECT xmlData.value ( ? ) FROM customerRecords", - "statement_metadata": { - "size": 21, - "tables": ["customerRecords"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json deleted file mode 100644 index bc0c0f6e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/conditional-update-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount >= 1000 THEN 'High Value' ELSE 'Regular' END;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount >= ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json deleted file mode 100644 index 5fe74fe1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Processed' WHERE order_date < GETDATE() - 30;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json deleted file mode 100644 index 68e5c7fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-complex-where.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Review Needed' WHERE customer_id IN (SELECT id FROM customers WHERE last_order_date < GETDATE() - 365) AND status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE customer_id IN ( SELECT id FROM customers WHERE last_order_date < GETDATE ( ) - ? ) AND status = ?", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json deleted file mode 100644 index b16fadd9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-from-aliases.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE o SET o.status = 'Completed' FROM orders o WHERE o.order_date > '2023-01-01' AND o.amount > 500;", - "outputs": [ - { - "expected": "UPDATE o SET o.status = ? FROM orders o WHERE o.order_date > ? AND o.amount > ?", - "statement_metadata": { - "size": 13, - "tables": ["o", "orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json deleted file mode 100644 index 7d268717..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-join-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE TOP (5) o SET o.status = 'Pending Review' FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'North';", - "outputs": [ - { - "expected": "UPDATE TOP ( ? ) o SET o.status = ? FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json deleted file mode 100644 index f5a6d59f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-rowlock-hint.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders WITH (ROWLOCK) SET status = 'Processing' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders WITH ( ROWLOCK ) SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json deleted file mode 100644 index f2e62659..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-quoted-identifiers.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE [orders] SET [status] = 'Confirmed' WHERE [order_date] >= '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE order_date >= ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json deleted file mode 100644 index e2c36d09..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-top.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE TOP (10) orders SET status = 'Reviewed' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE TOP ( ? ) orders SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 6, - "tables": [], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json deleted file mode 100644 index c7d21a31..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-using-variable-store-value.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @maxDate DATETIME; SET @maxDate = (SELECT MAX(order_date) FROM orders); UPDATE orders SET status = 'Old Order' WHERE order_date < @maxDate;", - "outputs": [ - { - "expected": "DECLARE @maxDate DATETIME; SET @maxDate = ( SELECT MAX ( order_date ) FROM orders ); UPDATE orders SET status = ? WHERE order_date < @maxDate", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json deleted file mode 100644 index 470f3955..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-boolean-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET is_priority = CASE WHEN total_amount > 1000 THEN 1 ELSE 0 END WHERE order_date > '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET is_priority = CASE WHEN total_amount > ? THEN ? ELSE ? END WHERE order_date > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json deleted file mode 100644 index b9a20bbc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount > 1000 THEN 'High Value' ELSE 'Standard' END WHERE order_date >= '2023-01-01';", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount > ? THEN ? ELSE ? END WHERE order_date >= ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json deleted file mode 100644 index c167ed39..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH UpdatedOrders AS (SELECT id FROM orders WHERE order_date < GETDATE() - 30) UPDATE o SET o.status = 'Archived' FROM orders o JOIN UpdatedOrders uo ON o.id = uo.id;", - "outputs": [ - { - "expected": "WITH UpdatedOrders AS ( SELECT id FROM orders WHERE order_date < GETDATE ( ) - ? ) UPDATE o SET o.status = ? FROM orders o JOIN UpdatedOrders uo ON o.id = uo.id", - "statement_metadata": { - "size": 36, - "tables": ["orders", "o", "UpdatedOrders"], - "commands": ["SELECT", "UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json deleted file mode 100644 index 6a4fd85d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-date-manipulation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET due_date = DATEADD(day, 10, order_date) WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET due_date = DATEADD ( day, ?, order_date ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json deleted file mode 100644 index e17a7422..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE o SET o.status = 'Dispatched' FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = 'West' AND o.status = 'Processed';", - "outputs": [ - { - "expected": "UPDATE o SET o.status = ? FROM orders o INNER JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 26, - "tables": ["o", "orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json deleted file mode 100644 index 67b84476..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-named-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE @status NVARCHAR(50); SET @status = 'Delayed'; UPDATE orders SET status = @status WHERE order_date < GETDATE() - 60;", - "outputs": [ - { - "expected": "DECLARE @status NVARCHAR ( ? ); SET @status = ?; UPDATE orders SET status = @status WHERE order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json deleted file mode 100644 index bcfce2b0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-null-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET delivery_date = NULLIF(order_date, due_date) WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "UPDATE orders SET delivery_date = NULLIF ( order_date, due_date ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json deleted file mode 100644 index 6b8ec4b7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-numeric-calculation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET total_amount = quantity * unit_price WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET total_amount = quantity * unit_price WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json deleted file mode 100644 index a5edfb60..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-output.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Cancelled' OUTPUT deleted.id, deleted.status WHERE status = 'Pending' AND order_date < GETDATE() - 90;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? OUTPUT deleted.id, deleted.status WHERE status = ? AND order_date < GETDATE ( ) - ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json deleted file mode 100644 index b81b8138..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-string-concatenation.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET notes = CONCAT(notes, ' Updated on ', CONVERT(VARCHAR, GETDATE(), 101)) WHERE status = 'Shipped';", - "outputs": [ - { - "expected": "UPDATE orders SET notes = CONCAT ( notes, ?, CONVERT ( VARCHAR, GETDATE ( ), ? ) ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json deleted file mode 100644 index 00d3be23..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mssql/update/update-with-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'High Priority' WHERE id IN (SELECT order_id FROM order_details WHERE quantity > 10);", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE id IN ( SELECT order_id FROM order_details WHERE quantity > ? )", - "statement_metadata": { - "size": 31, - "tables": ["orders", "order_details"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json deleted file mode 100644 index 39a0d7fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-poorly-written-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.*, b.totalAmount, CASE WHEN c.id IS NOT NULL THEN d.description ELSE 'N/A' END AS description\n-- Joining table a with b to get total amounts. If c.id is not null, get description from d\nFROM (SELECT id, name, status, customer_id\n FROM orders\n WHERE order_date > DATE_ADD(CURDATE(), INTERVAL -6 MONTH)\n AND status IN ('Pending', 'Completed')\n AND customer_id IN (SELECT customer_id FROM customers WHERE region IN ('East', 'West') AND last_order_date > DATE_ADD(CURDATE(), INTERVAL -1 YEAR))\n ORDER BY name DESC) a\nINNER JOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details GROUP BY order_id) b ON a.id = b.order_id\nLEFT JOIN audit_log c ON a.id = c.order_id\nLEFT JOIN (SELECT DISTINCT status, description FROM status_descriptions) d ON a.status = d.status\nWHERE a.name LIKE '%test%'\n-- Filtering on name containing 'test'\nAND (b.totalAmount > 1000 OR b.totalAmount IS NULL)\nORDER BY a.order_date DESC, a.name;", - "outputs": [ - { - "expected": "SELECT a. *, b.totalAmount, CASE WHEN c.id IS NOT ? THEN d.description ELSE ? END FROM ( SELECT id, name, status, customer_id FROM orders WHERE order_date > DATE_ADD ( CURDATE ( ), INTERVAL ? MONTH ) AND status IN ( ? ) AND customer_id IN ( SELECT customer_id FROM customers WHERE region IN ( ? ) AND last_order_date > DATE_ADD ( CURDATE ( ), INTERVAL ? YEAR ) ) ORDER BY name DESC ) a INNER JOIN ( SELECT order_id, SUM ( amount ) FROM order_details GROUP BY order_id ) b ON a.id = b.order_id LEFT JOIN audit_log c ON a.id = c.order_id LEFT JOIN ( SELECT DISTINCT status, description FROM status_descriptions ) d ON a.status = d.status WHERE a.name LIKE ? AND ( b.totalAmount > ? OR b.totalAmount IS ? ) ORDER BY a.order_date DESC, a.name", - "statement_metadata": { - "size": 195, - "tables": ["orders", "customers", "order_details", "audit_log", "status_descriptions"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Joining table a with b to get total amounts. If c.id is not null, get description from d", "-- Filtering on name containing 'test'"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json deleted file mode 100644 index e0bde0ba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-multiple-joins.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT a.id, a.name, IFNULL(b.totalAmount, 0) AS totalAmount, c.comment, d.productCount, e.latestOrderDate\n-- Extremely complex query combining multiple joins, subqueries, and inline views\nFROM (SELECT id, name FROM customers WHERE status = 'Active') a\nJOIN (SELECT customer_id, SUM(amount) AS totalAmount FROM orders GROUP BY customer_id) b ON a.id = b.customer_id\nLEFT JOIN (SELECT customer_id, comment FROM customer_feedback WHERE rating = 5 ORDER BY feedback_date DESC LIMIT 1) c ON a.id = c.customer_id\nLEFT JOIN (SELECT customer_id, COUNT(*) AS productCount FROM order_details GROUP BY customer_id) d ON a.id = d.customer_id\nLEFT JOIN (SELECT customer_id, MAX(order_date) AS latestOrderDate FROM orders WHERE status IN ('Completed', 'Shipped') GROUP BY customer_id) e ON a.id = e.customer_id\nWHERE a.name LIKE '%Corp%' AND (b.totalAmount > 1000 OR d.productCount > 5)\nORDER BY a.name, totalAmount DESC;", - "outputs": [ - { - "expected": "SELECT a.id, a.name, IFNULL ( b.totalAmount, ? ), c.comment, d.productCount, e.latestOrderDate FROM ( SELECT id, name FROM customers WHERE status = ? ) a JOIN ( SELECT customer_id, SUM ( amount ) FROM orders GROUP BY customer_id ) b ON a.id = b.customer_id LEFT JOIN ( SELECT customer_id, comment FROM customer_feedback WHERE rating = ? ORDER BY feedback_date DESC LIMIT ? ) c ON a.id = c.customer_id LEFT JOIN ( SELECT customer_id, COUNT ( * ) FROM order_details GROUP BY customer_id ) d ON a.id = d.customer_id LEFT JOIN ( SELECT customer_id, MAX ( order_date ) FROM orders WHERE status IN ( ? ) GROUP BY customer_id ) e ON a.id = e.customer_id WHERE a.name LIKE ? AND ( b.totalAmount > ? OR d.productCount > ? ) ORDER BY a.name, totalAmount DESC", - "statement_metadata": { - "size": 136, - "tables": ["customers", "orders", "customer_feedback", "order_details"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Extremely complex query combining multiple joins, subqueries, and inline views"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json deleted file mode 100644 index a086430d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/complex/super-complex-sql-nested-subqueries.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "SELECT t1.id, t1.status, t3.totalAmount, t4.commentsCount, CASE WHEN t5.latestCommentDate IS NOT NULL THEN t5.latestCommentDate ELSE 'No Comments' END AS latestComment\n-- Complex query joining multiple subqueries and using conditional logic\nFROM (SELECT id, status FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'North') AND order_date > (SELECT MAX(order_date) FROM orders WHERE status = 'Pending')) t1\nJOIN (SELECT order_id, SUM(amount) AS totalAmount FROM order_details WHERE product_id IN (SELECT id FROM products WHERE name LIKE '%Premium%') GROUP BY order_id) t3 ON t1.id = t3.order_id\nLEFT JOIN (SELECT order_id, COUNT(*) AS commentsCount FROM order_comments GROUP BY order_id) t4 ON t1.id = t4.order_id\nLEFT JOIN (SELECT order_id, MAX(comment_date) AS latestCommentDate FROM order_comments WHERE comment LIKE '%urgent%' GROUP BY order_id) t5 ON t1.id = t5.order_id\nWHERE t1.status NOT IN ('Cancelled', 'Returned') AND (t3.totalAmount > 500 OR t4.commentsCount > 10)\nORDER BY t1.id, latestComment DESC;", - "outputs": [ - { - "expected": "SELECT t?.id, t?.status, t?.totalAmount, t?.commentsCount, CASE WHEN t?.latestCommentDate IS NOT ? THEN t?.latestCommentDate ELSE ? END FROM ( SELECT id, status FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? ) AND order_date > ( SELECT MAX ( order_date ) FROM orders WHERE status = ? ) ) t? JOIN ( SELECT order_id, SUM ( amount ) FROM order_details WHERE product_id IN ( SELECT id FROM products WHERE name LIKE ? ) GROUP BY order_id ) t? ON t?.id = t?.order_id LEFT JOIN ( SELECT order_id, COUNT ( * ) FROM order_comments GROUP BY order_id ) t? ON t?.id = t?.order_id LEFT JOIN ( SELECT order_id, MAX ( comment_date ) FROM order_comments WHERE comment LIKE ? GROUP BY order_id ) t? ON t?.id = t?.order_id WHERE t?.status NOT IN ( ? ) AND ( t?.totalAmount > ? OR t?.commentsCount > ? ) ORDER BY t?.id, latestComment DESC", - "statement_metadata": { - "size": 132, - "tables": ["orders", "customers", "order_details", "products", "order_comments"], - "commands": ["SELECT", "JOIN"], - "comments": ["-- Complex query joining multiple subqueries and using conditional logic"], - "procedures": [] - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json deleted file mode 100644 index eb9025d8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Cancelled';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json deleted file mode 100644 index 2e4a3b54..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE region = 'North'; -- Assuming CASCADE DELETE is set up on the foreign key in the orders table", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE region = ?", - "statement_metadata": { - "size": 90, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assuming CASCADE DELETE is set up on the foreign key in the orders table"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json deleted file mode 100644 index 6ebeb031..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-cascading-triggers.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE id = 1; -- Assumes a trigger exists for cascading delete to orders", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE id = ?", - "statement_metadata": { - "size": 73, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": ["-- Assumes a trigger exists for cascading delete to orders"], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json deleted file mode 100644 index 27eb2283..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-conditional-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = IF(DAYOFWEEK(CURDATE()) = 1, 'Pending', 'Completed');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = IF ( DAYOFWEEK ( CURDATE ( ) ) = ?, ?, ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json deleted file mode 100644 index c5ab710f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-foreign-key-constraints.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id IN (SELECT order_id FROM order_details WHERE quantity = 0);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id IN ( SELECT order_id FROM order_details WHERE quantity = ? )", - "statement_metadata": { - "size": 31, - "tables": ["orders", "order_details"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json deleted file mode 100644 index b2507bb4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-free-disk-space.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE order_date < '2020-01-01'; OPTIMIZE TABLE orders;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE order_date < ?; OPTIMIZE TABLE orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json deleted file mode 100644 index b6f629cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-join-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = 'Completed' AND c.region = 'South';", - "outputs": [ - { - "expected": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = ? AND c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json deleted file mode 100644 index 84e1c59c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-lock-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "LOCK TABLES orders WRITE; DELETE FROM orders WHERE status = 'Failed'; UNLOCK TABLES;", - "outputs": [ - { - "expected": "LOCK TABLES orders WRITE; DELETE FROM orders WHERE status = ?; UNLOCK TABLES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json deleted file mode 100644 index d90a73b9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-multiple-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE orders, order_details FROM orders INNER JOIN order_details ON orders.id = order_details.order_id WHERE orders.status = 'Obsolete';", - "outputs": [ - { - "expected": "DELETE orders, order_details FROM orders INNER JOIN order_details ON orders.id = order_details.order_id WHERE orders.status = ?", - "statement_metadata": { - "size": 29, - "tables": ["orders", "order_details"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json deleted file mode 100644 index e521eb50..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-optimized-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Completed' AND order_date < DATE_SUB(NOW(), INTERVAL 1 YEAR);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? AND order_date < DATE_SUB ( NOW ( ), INTERVAL ? YEAR )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json deleted file mode 100644 index 3f054e3c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-order-by-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Completed' ORDER BY order_date DESC LIMIT 5;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? ORDER BY order_date DESC LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json deleted file mode 100644 index 0c41e2d2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-range-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE amount BETWEEN 100 AND 500;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE amount BETWEEN ? AND ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json deleted file mode 100644 index 7fa55b7e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-regular-expressions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status REGEXP '^C.*';", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json deleted file mode 100644 index 9abb2814..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-safe-update-mode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET SQL_SAFE_UPDATES = 0; DELETE FROM orders WHERE customer_id = 1; SET SQL_SAFE_UPDATES = 1;", - "outputs": [ - { - "expected": "SET SQL_SAFE_UPDATES = ?; DELETE FROM orders WHERE customer_id = ?; SET SQL_SAFE_UPDATES = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json deleted file mode 100644 index 14944cec..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-subquery-optimization.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id IN (SELECT id FROM orders WHERE status = 'Failed' LIMIT 10);", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id IN ( SELECT id FROM orders WHERE status = ? LIMIT ? )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json deleted file mode 100644 index 6a7053a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-truncate.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "TRUNCATE TABLE order_details;", - "outputs": [ - { - "expected": "TRUNCATE TABLE order_details", - "statement_metadata": { - "size": 21, - "tables": ["order_details"], - "commands": ["TRUNCATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json deleted file mode 100644 index 2d739ca4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id IN (SELECT id FROM customers WHERE region = 'West');", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id IN ( SELECT id FROM customers WHERE region = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json deleted file mode 100644 index c6def56a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE c.region = 'East' AND o.status = 'Pending';", - "outputs": [ - { - "expected": "DELETE o FROM orders o JOIN customers c ON o.customer_id = c.id WHERE c.region = ? AND o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["DELETE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json deleted file mode 100644 index 60b99764..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE status = 'Pending' LIMIT 10;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE status = ? LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json deleted file mode 100644 index a0c41cc6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/delete/delete-with-user-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @max_id = (SELECT MAX(id) FROM orders); DELETE FROM orders WHERE id = @max_id;", - "outputs": [ - { - "expected": "SET @max_id = ( SELECT MAX ( id ) FROM orders ); DELETE FROM orders WHERE id = @max_id", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json deleted file mode 100644 index 6ff42c67..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/batch-insert-multiple-rows.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) VALUES (1, 'Pending'), (2, 'Completed'), (3, 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) VALUES ( ? ), ( ? ), ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json deleted file mode 100644 index 0c866869..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-auto-increment.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) VALUES (3, 'Processing');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json deleted file mode 100644 index 7fce1641..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, order_date, status) VALUES (1, NOW(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, order_date, status ) VALUES ( ?, NOW ( ), ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json deleted file mode 100644 index 10f1fe22..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-blob-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, document) VALUES (5, 'Pending', LOAD_FILE('/path/to/file'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, document ) VALUES ( ?, LOAD_FILE ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json deleted file mode 100644 index 2297c9b8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-enum-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, order_type) VALUES (7, 'Pending', 'Express');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, order_type ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json deleted file mode 100644 index 706788ae..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-ignore.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT IGNORE INTO orders (id, customer_id, status) VALUES (1, 10, 'Cancelled');", - "outputs": [ - { - "expected": "INSERT IGNORE INTO orders ( id, customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json deleted file mode 100644 index 43566686..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-json-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, details) VALUES (1, 'Pending', '{\"items\": [\"item1\", \"item2\"]}');", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, details ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json deleted file mode 100644 index 4dc3f78b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-on-duplicate-key.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (id, customer_id, status) VALUES (100, 2, 'Pending') ON DUPLICATE KEY UPDATE status = 'Pending';", - "outputs": [ - { - "expected": "INSERT INTO orders ( id, customer_id, status ) VALUES ( ? ) ON DUPLICATE KEY UPDATE status = ?", - "statement_metadata": { - "size": 24, - "tables": ["orders", "status"], - "commands": ["INSERT", "UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json deleted file mode 100644 index f9a24172..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-select-union.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status) SELECT customer_id, status FROM archived_orders UNION ALL SELECT customer_id, status FROM special_orders;", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status ) SELECT customer_id, status FROM archived_orders UNION ALL SELECT customer_id, status FROM special_orders", - "statement_metadata": { - "size": 47, - "tables": ["orders", "archived_orders", "special_orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json deleted file mode 100644 index e7a245ed..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, location) VALUES (6, 'Delivered', ST_GeomFromText('POINT(1 1)'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, location ) VALUES ( ?, ST_GeomFromText ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json deleted file mode 100644 index 63eaa529..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-last-insert-id.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (name) VALUES ('John Doe'); INSERT INTO orders (customer_id, status) VALUES (LAST_INSERT_ID(), 'Pending');", - "outputs": [ - { - "expected": "INSERT INTO customers ( name ) VALUES ( ? ); INSERT INTO orders ( customer_id, status ) VALUES ( LAST_INSERT_ID ( ), ? )", - "statement_metadata": { - "size": 21, - "tables": ["customers", "orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json deleted file mode 100644 index 03fbae9d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-using-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO order_audit (order_id, status) SELECT id, status FROM orders WHERE customer_id = 1;", - "outputs": [ - { - "expected": "INSERT INTO order_audit ( order_id, status ) SELECT id, status FROM orders WHERE customer_id = ?", - "statement_metadata": { - "size": 29, - "tables": ["order_audit", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json deleted file mode 100644 index 61d60872..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-conditional-logic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, amount) SELECT id, 'New', IF(region = 'West', 100, 50) FROM customers;", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, amount ) SELECT id, ?, IF ( region = ?, ?, ? ) FROM customers", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json deleted file mode 100644 index 1eb52dac..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-curdate-curtime.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, order_date, order_time) VALUES (15, 'Pending', CURDATE(), CURTIME());", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, order_date, order_time ) VALUES ( ?, CURDATE ( ), CURTIME ( ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json deleted file mode 100644 index 40427ead..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-encryption-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, encrypted_note) VALUES (13, 'Pending', AES_ENCRYPT('Confidential note', 'encryption_key'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, encrypted_note ) VALUES ( ?, AES_ENCRYPT ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json deleted file mode 100644 index 7e6b9f55..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-generated-columns.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, total_incl_tax) VALUES (12, 'Pending', 150); -- total_incl_tax is a generated column", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, total_incl_tax ) VALUES ( ? )", - "statement_metadata": { - "size": 51, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": ["-- total_incl_tax is a generated column"], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json deleted file mode 100644 index 651ee691..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-replace.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "REPLACE INTO orders (id, customer_id, status) VALUES (1, 9, 'Completed');", - "outputs": [ - { - "expected": "REPLACE INTO orders ( id, customer_id, status ) VALUES ( ? )", - "statement_metadata": { - "size": 6, - "tables": ["orders"], - "commands": [], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json deleted file mode 100644 index 21afae77..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-set-syntax.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders SET customer_id = 8, status = 'Processing', order_date = CURDATE();", - "outputs": [ - { - "expected": "INSERT INTO orders SET customer_id = ?, status = ?, order_date = CURDATE ( )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json deleted file mode 100644 index 4059132b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, location) VALUES (14, 'Pending', ST_GeomFromText('POINT(1 1)'));", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, location ) VALUES ( ?, ST_GeomFromText ( ? ) )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json deleted file mode 100644 index e93ee978..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/insert/insert-with-timestamp.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO orders (customer_id, status, created_at) VALUES (4, 'Shipped', CURRENT_TIMESTAMP);", - "outputs": [ - { - "expected": "INSERT INTO orders ( customer_id, status, created_at ) VALUES ( ?, CURRENT_TIMESTAMP )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json deleted file mode 100644 index 7137fbbf..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/complex-procedure-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE UpdateOrderStatus(IN orderId INT, IN newStatus VARCHAR(20)) BEGIN\n DECLARE EXIT HANDLER FOR SQLEXCEPTION\n BEGIN\n -- Handle error\n ROLLBACK;\n END;\n START TRANSACTION;\n UPDATE orders SET status = newStatus WHERE id = orderId;\n IF ROW_COUNT() = 0 THEN\n SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'No rows updated';\n END IF;\n COMMIT;\n END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE UpdateOrderStatus ( IN orderId INT, IN newStatus VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = newStatus WHERE id = orderId; IF ROW_COUNT ( ) = ? THEN SIGNAL SQLSTATE ? SET MESSAGE_TEXT = ?; END IF; COMMIT; END", - "statement_metadata": { - "size": 61, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE", "COMMIT"], - "comments": ["-- Handle error"], - "procedures": ["UpdateOrderStatus"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json deleted file mode 100644 index e8487b6b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-basic.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetAllOrders() BEGIN SELECT * FROM orders; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetAllOrders ( ) BEGIN SELECT * FROM orders; END", - "statement_metadata": { - "size": 35, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetAllOrders"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json deleted file mode 100644 index cb2b88ef..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-conditional-logic-loop.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE ProcessOrders() BEGIN\n DECLARE done INT DEFAULT 0;\n DECLARE a INT;\n DECLARE cur1 CURSOR FOR SELECT id FROM orders WHERE status = 'Pending';\n DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = 1;\n OPEN cur1;\n read_loop: LOOP\n FETCH cur1 INTO a;\n IF done THEN\n LEAVE read_loop;\n END IF;\n UPDATE orders SET status = 'Processing' WHERE id = a;\n END LOOP;\n CLOSE cur1;\n END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE ProcessOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE a INT; DECLARE cur? CURSOR FOR SELECT id FROM orders WHERE status = ?; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur?; read_loop : LOOP FETCH cur? INTO a; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = ? WHERE id = a; END LOOP; CLOSE cur?; END", - "statement_metadata": { - "size": 43, - "tables": ["orders", "a"], - "commands": ["CREATE", "BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": ["ProcessOrders"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json deleted file mode 100644 index d0e3462b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-cursor.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE FetchOrders() BEGIN DECLARE done INT DEFAULT FALSE; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE; OPEN cur; read_loop: LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; /* process each order */ END LOOP; CLOSE cur; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE FetchOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur; read_loop : LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; END LOOP; CLOSE cur; END", - "statement_metadata": { - "size": 66, - "tables": ["orders", "order_id"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": ["/* process each order */"], - "procedures": ["FetchOrders"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json deleted file mode 100644 index 42128dc7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-dynamic-sql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE DynamicQuery(IN tbl_name VARCHAR(50)) BEGIN SET @s = CONCAT('SELECT * FROM ', tbl_name); PREPARE stmt FROM @s; EXECUTE stmt; DEALLOCATE PREPARE stmt; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE DynamicQuery ( IN tbl_name VARCHAR ( ? ) ) BEGIN SET @s = CONCAT ( ?, tbl_name ); PREPARE stmt FROM @s; EXECUTE stmt; DEALLOCATE PREPARE stmt; END", - "statement_metadata": { - "size": 30, - "tables": [], - "commands": ["CREATE", "BEGIN", "EXECUTE"], - "comments": [], - "procedures": ["DynamicQuery"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json deleted file mode 100644 index 82fdd16b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-error-handling.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE SafeUpdate(IN order_id INT, IN new_status VARCHAR(50)) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN -- handle error\n SET @error = 'An error occurred'; END; UPDATE orders SET status = new_status WHERE id = order_id; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE SafeUpdate ( IN order_id INT, IN new_status VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN SET @error = ?; END; UPDATE orders SET status = new_status WHERE id = order_id; END", - "statement_metadata": { - "size": 48, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": ["-- handle error"], - "procedures": ["SafeUpdate"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json deleted file mode 100644 index 26711350..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-input-output-parameters.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetTotalOrders(OUT total INT) BEGIN SELECT COUNT(*) INTO total FROM orders; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetTotalOrders ( OUT total INT ) BEGIN SELECT COUNT ( * ) INTO total FROM orders; END", - "statement_metadata": { - "size": 42, - "tables": ["total", "orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetTotalOrders"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json deleted file mode 100644 index fc8f688d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-loop-control.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE ProcessOrders() BEGIN DECLARE done INT DEFAULT FALSE; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE; OPEN cur; read_loop: LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = 'Processed' WHERE id = order_id; END LOOP; CLOSE cur; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE ProcessOrders ( ) BEGIN DECLARE done INT DEFAULT ?; DECLARE cur CURSOR FOR SELECT id FROM orders; DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = ?; OPEN cur; read_loop : LOOP FETCH cur INTO order_id; IF done THEN LEAVE read_loop; END IF; UPDATE orders SET status = ? WHERE id = order_id; END LOOP; CLOSE cur; END", - "statement_metadata": { - "size": 50, - "tables": ["orders", "order_id"], - "commands": ["CREATE", "BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": ["ProcessOrders"] - } - } - ] -} diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json deleted file mode 100644 index fc98aa4f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-parameters.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE GetOrdersByStatus(IN status VARCHAR(20)) BEGIN SELECT * FROM orders WHERE orders.status = status; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE GetOrdersByStatus ( IN status VARCHAR ( ? ) ) BEGIN SELECT * FROM orders WHERE orders.status = status; END", - "statement_metadata": { - "size": 40, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["GetOrdersByStatus"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json deleted file mode 100644 index 75aace98..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/procedure/stored-procedure-transaction-management.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "CREATE PROCEDURE UpdateOrderTransaction(IN order_id INT, IN new_status VARCHAR(50)) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = new_status WHERE id = order_id; COMMIT; END;", - "outputs": [ - { - "expected": "CREATE PROCEDURE UpdateOrderTransaction ( IN order_id INT, IN new_status VARCHAR ( ? ) ) BEGIN DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN ROLLBACK; END; START TRANSACTION; UPDATE orders SET status = new_status WHERE id = order_id; COMMIT; END", - "statement_metadata": { - "size": 51, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE", "COMMIT"], - "comments": [], - "procedures": ["UpdateOrderTransaction"] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json deleted file mode 100644 index c39da7ed..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/bit-data-type.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status, (is_paid & 1) AS isPaidFlag FROM orders;", - "outputs": [ - { - "expected": "SELECT id, status, ( is_paid & ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json deleted file mode 100644 index d80018d4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/blob-text-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, SUBSTRING(order_notes, 1, 100) AS short_notes FROM orders WHERE LENGTH(document_blob) > 1024;", - "outputs": [ - { - "expected": "SELECT id, SUBSTRING ( order_notes, ?, ? ) FROM orders WHERE LENGTH ( document_blob ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json deleted file mode 100644 index e3b65579..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/decimal-data-type.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ROUND(total_amount, 2) AS rounded_total FROM orders;", - "outputs": [ - { - "expected": "SELECT id, ROUND ( total_amount, ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json deleted file mode 100644 index cfccc93b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/enum-set-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, order_type, status_flags FROM order_details WHERE order_type = 'Standard' AND FIND_IN_SET('urgent', status_flags);", - "outputs": [ - { - "expected": "SELECT id, order_type, status_flags FROM order_details WHERE order_type = ? AND FIND_IN_SET ( ?, status_flags )", - "statement_metadata": { - "size": 19, - "tables": ["order_details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json deleted file mode 100644 index 1374d833..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/full-text-search-innodb.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE FULLTEXT INDEX ft_index ON orders (description); SELECT * FROM orders WHERE MATCH(description) AGAINST ('+delivery -return' IN BOOLEAN MODE);", - "outputs": [ - { - "expected": "CREATE FULLTEXT INDEX ft_index ON orders ( description ); SELECT * FROM orders WHERE MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["CREATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json deleted file mode 100644 index a8880b7d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-aggregate-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT customer_id, COUNT(*) AS total_orders FROM orders GROUP BY customer_id HAVING COUNT(*) > 5;", - "outputs": [ - { - "expected": "SELECT customer_id, COUNT ( * ) FROM orders GROUP BY customer_id HAVING COUNT ( * ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json deleted file mode 100644 index be6c6062..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, customer_id, order_date, status FROM orders WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SELECT id, customer_id, order_date, status FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json deleted file mode 100644 index 68202df4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-case-statement.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, CASE WHEN status = 'Pending' THEN 'P' WHEN status = 'Completed' THEN 'C' ELSE 'Other' END AS status_code FROM orders;", - "outputs": [ - { - "expected": "SELECT id, CASE WHEN status = ? THEN ? WHEN status = ? THEN ? ELSE ? END FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json deleted file mode 100644 index d6dc561d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-coalesce-function.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, COALESCE(comments, 'No comments') AS order_comments FROM orders;", - "outputs": [ - { - "expected": "SELECT id, COALESCE ( comments, ? ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json deleted file mode 100644 index 68202df4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-conditional-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, CASE WHEN status = 'Pending' THEN 'P' WHEN status = 'Completed' THEN 'C' ELSE 'Other' END AS status_code FROM orders;", - "outputs": [ - { - "expected": "SELECT id, CASE WHEN status = ? THEN ? WHEN status = ? THEN ? ELSE ? END FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json deleted file mode 100644 index 482f9a26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-date-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, YEAR(order_date) AS order_year FROM orders WHERE MONTH(order_date) = 1;", - "outputs": [ - { - "expected": "SELECT id, YEAR ( order_date ) FROM orders WHERE MONTH ( order_date ) = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json deleted file mode 100644 index 6552154c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-distinct.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT DISTINCT status FROM orders;", - "outputs": [ - { - "expected": "SELECT DISTINCT status FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json deleted file mode 100644 index c5dfd177..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-full-text-search.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, MATCH (description) AGAINST ('+shipping -delayed' IN BOOLEAN MODE) AS score FROM orders WHERE MATCH (description) AGAINST ('+shipping -delayed' IN BOOLEAN MODE) > 0;", - "outputs": [ - { - "expected": "SELECT id, MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE ) FROM orders WHERE MATCH ( description ) AGAINST ( ? IN BOOLEAN MODE ) > ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json deleted file mode 100644 index cb807f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-geospatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) AS location FROM orders WHERE ST_Distance_Sphere(location, ST_GeomFromText('POINT(10 20)')) < 10000;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance_Sphere ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json deleted file mode 100644 index 50c4d103..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-group-concat.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT customer_id, GROUP_CONCAT(status ORDER BY order_date DESC SEPARATOR ', ') AS order_statuses FROM orders GROUP BY customer_id;", - "outputs": [ - { - "expected": "SELECT customer_id, GROUP_CONCAT ( status ORDER BY order_date DESC SEPARATOR ? ) FROM orders GROUP BY customer_id", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json deleted file mode 100644 index 54f27bf1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join-aliases.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT o.id, c.name AS customer_name, o.status FROM orders o LEFT JOIN customers c ON o.customer_id = c.id;", - "outputs": [ - { - "expected": "SELECT o.id, c.name, o.status FROM orders o LEFT JOIN customers c ON o.customer_id = c.id", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json deleted file mode 100644 index 5aa1860f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT o.id, c.name, o.status FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = 'Completed';", - "outputs": [ - { - "expected": "SELECT o.id, c.name, o.status FROM orders o JOIN customers c ON o.customer_id = c.id WHERE o.status = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json deleted file mode 100644 index 1e550315..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-json-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, JSON_EXTRACT(order_details, '$.items[0].name') AS first_item_name FROM orders WHERE JSON_CONTAINS(order_details, '\"Active\"', '$.status');", - "outputs": [ - { - "expected": "SELECT id, JSON_EXTRACT ( order_details, ? ) FROM orders WHERE JSON_CONTAINS ( order_details, ?, ? )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json deleted file mode 100644 index d7af084b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-limit-offset.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders ORDER BY order_date DESC LIMIT 10 OFFSET 5;", - "outputs": [ - { - "expected": "SELECT * FROM orders ORDER BY order_date DESC LIMIT ? OFFSET ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json deleted file mode 100644 index 703bf310..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-lock-in-share-mode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE status = 'Pending' LOCK IN SHARE MODE;", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE status = ? LOCK IN SHARE MODE", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json deleted file mode 100644 index 9e526391..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-natural-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders NATURAL JOIN customers;", - "outputs": [ - { - "expected": "SELECT * FROM orders NATURAL JOIN customers", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json deleted file mode 100644 index 4db2716b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-parameter-binding.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status FROM orders WHERE customer_id = ?;", - "outputs": [ - { - "expected": "SELECT id, status FROM orders WHERE customer_id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json deleted file mode 100644 index 969c50c6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-regex.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, customer_id FROM orders WHERE status REGEXP '^Comp.*';", - "outputs": [ - { - "expected": "SELECT id, customer_id FROM orders WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json deleted file mode 100644 index eba10c9f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-straight-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT * FROM orders STRAIGHT_JOIN customers ON orders.customer_id = customers.id;", - "outputs": [ - { - "expected": "SELECT * FROM orders STRAIGHT_JOIN customers ON orders.customer_id = customers.id", - "statement_metadata": { - "size": 34, - "tables": ["orders", "customers"], - "commands": ["SELECT", "STRAIGHT_JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json deleted file mode 100644 index 6e3f96c1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-string-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, UPPER(status) AS status_upper FROM orders;", - "outputs": [ - { - "expected": "SELECT id, UPPER ( status ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json deleted file mode 100644 index 29290726..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, (SELECT name FROM customers WHERE id = orders.customer_id) AS customer_name FROM orders;", - "outputs": [ - { - "expected": "SELECT id, ( SELECT name FROM customers WHERE id = orders.customer_id ) FROM orders", - "statement_metadata": { - "size": 21, - "tables": ["customers", "orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json deleted file mode 100644 index 2593abf9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-user-defined-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @orderRank := 0; SELECT @orderRank := @orderRank + 1 AS rank, id, status FROM orders ORDER BY id;", - "outputs": [ - { - "expected": "SET @orderRank := ?; SELECT @orderRank := @orderRank + ?, id, status FROM orders ORDER BY id", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json deleted file mode 100644 index 44f417c3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-variable-assignment.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT @orderCount := COUNT(*) FROM orders WHERE status = 'Completed';", - "outputs": [ - { - "expected": "SELECT @orderCount := COUNT ( * ) FROM orders WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json deleted file mode 100644 index 981fa153..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/select-window-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, status, RANK() OVER (PARTITION BY customer_id ORDER BY order_date DESC) AS rank FROM orders;", - "outputs": [ - { - "expected": "SELECT id, status, RANK ( ) OVER ( PARTITION BY customer_id ORDER BY order_date DESC ) FROM orders", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json deleted file mode 100644 index cb807f36..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-data-types-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) AS location FROM orders WHERE ST_Distance_Sphere(location, ST_GeomFromText('POINT(10 20)')) < 10000;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance_Sphere ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json deleted file mode 100644 index 75160756..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/spatial-geometry-data-types.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, ST_AsText(location) FROM orders WHERE ST_Distance(location, ST_GeomFromText('POINT(1 1)')) < 100;", - "outputs": [ - { - "expected": "SELECT id, ST_AsText ( location ) FROM orders WHERE ST_Distance ( location, ST_GeomFromText ( ? ) ) < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json deleted file mode 100644 index ee383187..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/system-versioned-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_with_history (id INT, status VARCHAR(20)) WITH SYSTEM VERSIONING;", - "outputs": [ - { - "expected": "CREATE TABLE orders_with_history ( id INT, status VARCHAR ( ? ) ) WITH SYSTEM VERSIONING", - "statement_metadata": { - "size": 25, - "tables": ["orders_with_history"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json deleted file mode 100644 index dd436566..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/using-temporary-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TEMPORARY TABLE temp_orders SELECT * FROM orders; SELECT * FROM temp_orders WHERE status = 'Pending'; DROP TEMPORARY TABLE temp_orders;", - "outputs": [ - { - "expected": "CREATE TEMPORARY TABLE temp_orders SELECT * FROM orders; SELECT * FROM temp_orders WHERE status = ?; DROP TEMPORARY TABLE temp_orders", - "statement_metadata": { - "size": 33, - "tables": ["temp_orders", "orders"], - "commands": ["CREATE", "SELECT", "DROP"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json deleted file mode 100644 index a03d91de..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/select/virtual-generated-columns.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE TABLE orders_with_virtual (id INT, amount DECIMAL(10, 2), total_incl_tax DECIMAL(10, 2) GENERATED ALWAYS AS (amount * 1.1) STORED);", - "outputs": [ - { - "expected": "CREATE TABLE orders_with_virtual ( id INT, amount DECIMAL ( ? ), total_incl_tax DECIMAL ( ? ) GENERATED ALWAYS AS ( amount * ? ) STORED )", - "statement_metadata": { - "size": 25, - "tables": ["orders_with_virtual"], - "commands": ["CREATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json deleted file mode 100644 index 2e236f7a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/bulk-update-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = IF(amount > 1000, 'High Value', 'Regular'), order_date = IF(status = 'Pending', CURDATE(), order_date);", - "outputs": [ - { - "expected": "UPDATE orders SET status = IF ( amount > ?, ?, ? ), order_date = IF ( status = ?, CURDATE ( ), order_date )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json deleted file mode 100644 index 38bfb7a0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/conditional-update-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CASE WHEN amount > 100 THEN 'High Value' ELSE 'Regular' END;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CASE WHEN amount > ? THEN ? ELSE ? END", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json deleted file mode 100644 index 480f7a79..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-basic.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Completed' WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json deleted file mode 100644 index adf83e96..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-case-aggregate-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders o SET o.status = CASE WHEN avg_amount > 500 THEN 'High' ELSE 'Low' END FROM (SELECT customer_id, AVG(amount) as avg_amount FROM orders GROUP BY customer_id) a WHERE o.customer_id = a.customer_id;", - "outputs": [ - { - "expected": "UPDATE orders o SET o.status = CASE WHEN avg_amount > ? THEN ? ELSE ? END FROM ( SELECT customer_id, AVG ( amount ) FROM orders GROUP BY customer_id ) a WHERE o.customer_id = a.customer_id", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json deleted file mode 100644 index 3155a4aa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-date-time-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET order_date = CURDATE(), order_time = CURTIME() WHERE status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE orders SET order_date = CURDATE ( ), order_time = CURTIME ( ) WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json deleted file mode 100644 index 06ebd690..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-encryption-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET encrypted_note = AES_ENCRYPT('Confidential', 'key') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET encrypted_note = AES_ENCRYPT ( ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json deleted file mode 100644 index bd645304..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-enum-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET order_type = 'Standard' WHERE order_type = 'Express';", - "outputs": [ - { - "expected": "UPDATE orders SET order_type = ? WHERE order_type = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json deleted file mode 100644 index d28de61b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET details = JSON_SET(details, '$.shippingMethod', 'Express') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET details = JSON_SET ( details, ?, ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json deleted file mode 100644 index 56cb60e0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-json-modify.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET details = JSON_SET(details, '$.status', 'Updated') WHERE JSON_EXTRACT(details, '$.priority') = 'High';", - "outputs": [ - { - "expected": "UPDATE orders SET details = JSON_SET ( details, ?, ? ) WHERE JSON_EXTRACT ( details, ? ) = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json deleted file mode 100644 index 1aa721bc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-lock-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "LOCK TABLES orders WRITE; UPDATE orders SET status = 'Cancelled' WHERE status = 'Pending'; UNLOCK TABLES;", - "outputs": [ - { - "expected": "LOCK TABLES orders WRITE; UPDATE orders SET status = ? WHERE status = ?; UNLOCK TABLES", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json deleted file mode 100644 index c5fe1b53..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-math-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET amount = amount * 1.1 WHERE status = 'Completed';", - "outputs": [ - { - "expected": "UPDATE orders SET amount = amount * ? WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json deleted file mode 100644 index 56f3924b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-optimizing-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Archived' WHERE status = 'Completed' AND order_date < DATE_SUB(NOW(), INTERVAL 1 YEAR);", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ? AND order_date < DATE_SUB ( NOW ( ), INTERVAL ? YEAR )", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json deleted file mode 100644 index 28f2fa3a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-order-by-limit.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Cancelled' WHERE status = 'Pending' ORDER BY order_date ASC LIMIT 10;", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status = ? ORDER BY order_date ASC LIMIT ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json deleted file mode 100644 index 2863fb28..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-regular-expressions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Query' WHERE status REGEXP '^Q.*';", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE status REGEXP ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json deleted file mode 100644 index e93dd7b3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-spatial-data.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET location = ST_GeomFromText('POINT(1 1)') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET location = ST_GeomFromText ( ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json deleted file mode 100644 index 6a219694..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-string-functions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = CONCAT(status, ' - Updated') WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE orders SET status = CONCAT ( status, ? ) WHERE id = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json deleted file mode 100644 index b204f0ca..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-user-defined-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @new_status = 'Delayed'; UPDATE orders SET status = @new_status WHERE status = 'Pending';", - "outputs": [ - { - "expected": "SET @new_status = ?; UPDATE orders SET status = @new_status WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json deleted file mode 100644 index e79e587f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-using-variables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SET @new_status = 'Shipped'; UPDATE orders SET status = @new_status WHERE status = 'Processing';", - "outputs": [ - { - "expected": "SET @new_status = ?; UPDATE orders SET status = @new_status WHERE status = ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json deleted file mode 100644 index 81da8f15..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-join.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders o JOIN customers c ON o.customer_id = c.id SET o.status = 'Processing' WHERE c.region = 'East';", - "outputs": [ - { - "expected": "UPDATE orders o JOIN customers c ON o.customer_id = c.id SET o.status = ? WHERE c.region = ?", - "statement_metadata": { - "size": 25, - "tables": ["orders", "customers"], - "commands": ["UPDATE", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json deleted file mode 100644 index 9cdd8707..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/mysql/update/update-with-subquery.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "UPDATE orders SET status = 'Archived' WHERE id IN (SELECT id FROM orders WHERE order_date < '2020-01-01');", - "outputs": [ - { - "expected": "UPDATE orders SET status = ? WHERE id IN ( SELECT id FROM orders WHERE order_date < ? )", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json deleted file mode 100644 index ea1e6291..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/bulk-operations.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "DECLARE TYPE EmpTabTyp IS TABLE OF employees%ROWTYPE INDEX BY PLS_INTEGER; emp_tab EmpTabTyp; BEGIN SELECT * BULK COLLECT INTO emp_tab FROM employees; FORALL i IN emp_tab.FIRST .. emp_tab.LAST SAVE EXCEPTIONS UPDATE employees SET test = test * 1.05 WHERE employee_id = emp_tab(i).employee_id; END;", - "outputs": [ - { - "expected": "DECLARE TYPE EmpTabTyp IS TABLE OF employees % ROWTYPE INDEX BY PLS_INTEGER; emp_tab EmpTabTyp; BEGIN SELECT * BULK COLLECT INTO emp_tab FROM employees; FORALL i IN emp_tab.FIRST . . emp_tab.LAST SAVE EXCEPTIONS UPDATE employees SET test = test * ? WHERE employee_id = emp_tab(i) . employee_id; END;", - "statement_metadata": { - "size": 33, - "tables": ["emp_tab", "employees"], - "commands": ["BEGIN", "SELECT", "UPDATE"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json deleted file mode 100644 index 234ce92e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-multi-table-delete.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders o WHERE o.customer_id IN (SELECT c.id FROM customers c WHERE NOT EXISTS (SELECT 1 FROM customer_orders co WHERE co.customer_id = c.id AND co.order_date > SYSDATE - 365)) AND EXISTS (SELECT 1 FROM order_items oi WHERE oi.order_id = o.id AND oi.product_id IN (SELECT p.id FROM products p WHERE p.category = 'Obsolete'));", - "outputs": [ - { - "expected": "DELETE FROM orders o WHERE o.customer_id IN ( SELECT c.id FROM customers c WHERE NOT EXISTS ( SELECT ? FROM customer_orders co WHERE co.customer_id = c.id AND co.order_date > SYSDATE - ? ) ) AND EXISTS ( SELECT ? FROM order_items oi WHERE oi.order_id = o.id AND oi.product_id IN ( SELECT p.id FROM products p WHERE p.category = ? ) )", - "statement_metadata": { - "size": 61, - "tables": ["orders", "customers", "customer_orders", "order_items", "products"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json deleted file mode 100644 index 8ef8b456..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-nested-subqueries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT e.employee_id, (SELECT MAX(s.yoe) FROM employees s WHERE s.department_id = e.department_id) AS max_dept_yoe FROM employees e WHERE EXISTS (SELECT 1 FROM departments d WHERE d.id = e.department_id AND d.budget > (SELECT AVG(budget) FROM departments)) ORDER BY e.department_id, e.employee_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, ( SELECT MAX ( s.yoe ) FROM employees s WHERE s.department_id = e.department_id ) FROM employees e WHERE EXISTS ( SELECT ? FROM departments d WHERE d.id = e.department_id AND d.budget > ( SELECT AVG ( budget ) FROM departments ) ) ORDER BY e.department_id, e.employee_id", - "statement_metadata": { - "size": 26, - "tables": ["employees", "departments"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json deleted file mode 100644 index c3375c00..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/complex-select-aggregates-joins.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT u.id, u.name, COUNT(o.id) AS order_count, AVG(o.total) AS average_order FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = 'active' GROUP BY u.id, u.name HAVING COUNT(o.id) > 5;", - "outputs": [ - { - "expected": "SELECT u.id, u.name, COUNT ( o.id ), AVG ( o.total ) FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = ? GROUP BY u.id, u.name HAVING COUNT ( o.id ) > ?", - "statement_metadata": { - "size": 21, - "tables": ["users", "orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT u.id, u.name, COUNT(o.id), AVG(o.total) FROM users u JOIN orders o ON u.id = o.user_id WHERE u.status = ? GROUP BY u.id, u.name HAVING COUNT(o.id) > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json deleted file mode 100644 index 3c178aa4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-oracle-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH RECURSIVE sales_cte (product_id, total_sales, sales_rank) AS (SELECT product_id, SUM(amount), RANK() OVER (ORDER BY SUM(amount) DESC) FROM sales GROUP BY product_id UNION ALL SELECT s.product_id, s.total_sales, s.sales_rank FROM sales s JOIN sales_cte sc ON s.product_id = sc.product_id WHERE s.amount > 1000), complex_view AS (SELECT e.employee_id, e.department_id, e.test_amt, AVG(e.test_amt) OVER (PARTITION BY e.department_id) AS avg_dept_test_amt, d.department_name, d.manager_id, (SELECT MAX(p.price) FROM products p WHERE p.department_id = e.department_id) AS max_product_price FROM employees e JOIN departments d ON e.department_id = d.id WHERE e.hire_date > SYSDATE - INTERVAL '10' YEAR) SELECT cv.*, sc.total_sales, sc.sales_rank FROM complex_view cv LEFT JOIN sales_cte sc ON cv.department_id = sc.product_id WHERE cv.avg_dept_test_amt > (SELECT AVG(total_sal) FROM (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id)) AND EXISTS (SELECT 1 FROM customer_orders co WHERE co.employee_id = cv.employee_id AND co.order_status = 'Completed') ORDER BY cv.department_id, cv.test_amt DESC;", - "outputs": [ - { - "expected": "WITH RECURSIVE sales_cte ( product_id, total_sales, sales_rank ) AS ( SELECT product_id, SUM ( amount ), RANK ( ) OVER ( ORDER BY SUM ( amount ) DESC ) FROM sales GROUP BY product_id UNION ALL SELECT s.product_id, s.total_sales, s.sales_rank FROM sales s JOIN sales_cte sc ON s.product_id = sc.product_id WHERE s.amount > ? ), complex_view AS ( SELECT e.employee_id, e.department_id, e.test_amt, AVG ( e.test_amt ) OVER ( PARTITION BY e.department_id ), d.department_name, d.manager_id, ( SELECT MAX ( p.price ) FROM products p WHERE p.department_id = e.department_id ) FROM employees e JOIN departments d ON e.department_id = d.id WHERE e.hire_date > SYSDATE - INTERVAL ? YEAR ) SELECT cv. *, sc.total_sales, sc.sales_rank FROM complex_view cv LEFT JOIN sales_cte sc ON cv.department_id = sc.product_id WHERE cv.avg_dept_test_amt > ( SELECT AVG ( total_sal ) FROM ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ) ) AND EXISTS ( SELECT ? FROM customer_orders co WHERE co.employee_id = cv.employee_id AND co.order_status = ? ) ORDER BY cv.department_id, cv.test_amt DESC", - "statement_metadata": { - "size": 79, - "tables": ["sales", "sales_cte", "products", "employees", "departments", "complex_view", "customer_orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json deleted file mode 100644 index 7a4f29f4..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/extremely-complex-stored-procedure.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE /* test comments \n\tsecond line \n*/ complex_data_audit AS CURSOR emp_cursor IS SELECT employee_id FROM employees; /* another comment */ v_employee_id employees.employee_id%TYPE; BEGIN FOR emp_record IN emp_cursor LOOP v_employee_id := emp_record.employee_id; INSERT INTO audit_log (message) VALUES ('Auditing employee with ID: ' || v_employee_id); FOR c IN (SELECT * FROM customer_orders WHERE employee_id = v_employee_id) LOOP IF c.order_status = 'Pending' THEN UPDATE customer_orders SET order_status = 'Under Review' WHERE order_id = c.order_id; ELSE INSERT INTO audit_log (message) VALUES ('Order ' || c.order_id || ' already processed'); END IF; END LOOP; END LOOP; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20002, 'Error in complex_data_audit'); END complex_data_audit;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE complex_data_audit emp_cursor IS SELECT employee_id FROM employees; v_employee_id employees.employee_id % TYPE; BEGIN FOR emp_record IN emp_cursor LOOP v_employee_id := emp_record.employee_id; INSERT INTO audit_log ( message ) VALUES ( ? || v_employee_id ); FOR c IN ( SELECT * FROM customer_orders WHERE employee_id = v_employee_id ) LOOP IF c.order_status = ? THEN UPDATE customer_orders SET order_status = ? WHERE order_id = c.order_id; ELSE INSERT INTO audit_log ( message ) VALUES ( ? || c.order_id || ? ); END IF; END LOOP; END LOOP; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR ( ? ); END complex_data_audit", - "statement_metadata": { - "size": 135, - "tables": ["employees", "audit_log", "customer_orders"], - "commands": ["CREATE", "SELECT", "BEGIN", "INSERT", "UPDATE"], - "comments": ["/* test comments \n\tsecond line \n*/", "/* another comment */"], - "procedures": ["complex_data_audit"] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json deleted file mode 100644 index 9e098025..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/plsql-blocks.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DECLARE x NUMBER; BEGIN SELECT COUNT(*) INTO x FROM employees; DBMS_OUTPUT.PUT_LINE('Count: ' || x); END;", - "outputs": [ - { - "expected": "DECLARE x NUMBER; BEGIN SELECT COUNT ( * ) INTO x FROM employees; DBMS_OUTPUT.PUT_LINE ( ? || x ); END", - "statement_metadata": { - "size": 21, - "tables": ["x", "employees"], - "commands": ["BEGIN", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json deleted file mode 100644 index c1564cce..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/complex/super-complex-oracle-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH ranked_sales AS (SELECT product_id, SUM(amount) AS total_sales, RANK() OVER (ORDER BY SUM(amount) DESC) sales_rank FROM sales GROUP BY product_id), dept_costs AS (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id), latest_transactions AS (SELECT t.account_id, t.amount, ROW_NUMBER() OVER (PARTITION BY t.account_id ORDER BY t.transaction_date DESC) rn FROM transactions t WHERE t.transaction_date >= ADD_MONTHS(SYSDATE, -6)) SELECT e.employee_id, e.last_name, e.test_amt, d.department_name, d.location_id, rs.total_sales, rs.sales_rank, lt.amount AS latest_transaction_amount FROM employees e INNER JOIN departments d ON e.department_id = d.id LEFT JOIN ranked_sales rs ON e.product_id = rs.product_id LEFT JOIN latest_transactions lt ON e.account_id = lt.account_id AND lt.rn = 1 WHERE e.hire_date > '2010-01-01' AND (d.budget > (SELECT AVG(total_sal) FROM dept_costs) OR e.test_amt > (SELECT AVG(test_amt) FROM employees WHERE department_id = e.department_id)) AND EXISTS (SELECT 1 FROM customer_orders co WHERE co.employee_id = e.employee_id AND co.order_status = 'Completed') ORDER BY e.department_id, e.test_amt DESC;", - "outputs": [ - { - "expected": "WITH ranked_sales AS ( SELECT product_id, SUM ( amount ), RANK ( ) OVER ( ORDER BY SUM ( amount ) DESC ) sales_rank FROM sales GROUP BY product_id ), dept_costs AS ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ), latest_transactions AS ( SELECT t.account_id, t.amount, ROW_NUMBER ( ) OVER ( PARTITION BY t.account_id ORDER BY t.transaction_date DESC ) rn FROM transactions t WHERE t.transaction_date >= ADD_MONTHS ( SYSDATE, ? ) ) SELECT e.employee_id, e.last_name, e.test_amt, d.department_name, d.location_id, rs.total_sales, rs.sales_rank, lt.amount FROM employees e INNER JOIN departments d ON e.department_id = d.id LEFT JOIN ranked_sales rs ON e.product_id = rs.product_id LEFT JOIN latest_transactions lt ON e.account_id = lt.account_id AND lt.rn = ? WHERE e.hire_date > ? AND ( d.budget > ( SELECT AVG ( total_sal ) FROM dept_costs ) OR e.test_amt > ( SELECT AVG ( test_amt ) FROM employees WHERE department_id = e.department_id ) ) AND EXISTS ( SELECT ? FROM customer_orders co WHERE co.employee_id = e.employee_id AND co.order_status = ? ) ORDER BY e.department_id, e.test_amt DESC", - "statement_metadata": { - "size": 103, - "tables": ["sales", "employees", "transactions", "departments", "ranked_sales", "latest_transactions", "dept_costs", "customer_orders"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json deleted file mode 100644 index a9f54046..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/conditional-delete-with-case.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM user_notifications WHERE id IN (SELECT id FROM notifications WHERE recipient_id = 123 AND status = CASE WHEN urgency = 'High' THEN 'Unread' ELSE 'Read' END);", - "outputs": [ - { - "expected": "DELETE FROM user_notifications WHERE id IN ( SELECT id FROM notifications WHERE recipient_id = ? AND status = CASE WHEN urgency = ? THEN ? ELSE ? END )", - "statement_metadata": { - "size": 43, - "tables": ["user_notifications", "notifications"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json deleted file mode 100644 index ab3115cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM customers WHERE last_purchase_date < ADD_MONTHS(SYSDATE, -12);", - "outputs": [ - { - "expected": "DELETE FROM customers WHERE last_purchase_date < ADD_MONTHS ( SYSDATE, ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json deleted file mode 100644 index a0b7a7bd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-cascade.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE customer_id = 456 CASCADE;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE customer_id = ? CASCADE", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json deleted file mode 100644 index 90ab26d6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-using-rowid.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM employees WHERE rowid = (SELECT max(rowid) FROM employees WHERE department_id = 20);", - "outputs": [ - { - "expected": "DELETE FROM employees WHERE rowid = ( SELECT max ( rowid ) FROM employees WHERE department_id = ? )", - "statement_metadata": { - "size": 21, - "tables": ["employees"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json deleted file mode 100644 index 4752fe7b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-where-current-of.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM employees WHERE CURRENT OF emp_cursor;", - "outputs": [ - { - "expected": "DELETE FROM employees WHERE CURRENT OF emp_cursor", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json deleted file mode 100644 index c6fc3ee6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-complex-subqueries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM products WHERE id IN (SELECT p.id FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.quantity = 0);", - "outputs": [ - { - "expected": "DELETE FROM products WHERE id IN ( SELECT p.id FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.quantity = ? )", - "statement_metadata": { - "size": 33, - "tables": ["products", "inventory"], - "commands": ["DELETE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json deleted file mode 100644 index 7083c47c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-flashback-query.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders AS OF TIMESTAMP TO_TIMESTAMP('2023-03-15 08:30:00', 'YYYY-MM-DD HH24:MI:SS') WHERE order_date < '2023-01-01';", - "outputs": [ - { - "expected": "DELETE FROM orders AS OF TIMESTAMP TO_TIMESTAMP ( ? ) WHERE order_date < ?", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json deleted file mode 100644 index d3ec876d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-join-syntax.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM orders o WHERE EXISTS (SELECT 1 FROM customers c WHERE o.customer_id = c.id AND c.status = 'Inactive');", - "outputs": [ - { - "expected": "DELETE FROM orders o WHERE EXISTS ( SELECT ? FROM customers c WHERE o.customer_id = c.id AND c.status = ? )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "customers"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json deleted file mode 100644 index b1ab1b26..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-pseudocolumns.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM session_logs WHERE ROWNUM <= 10;", - "outputs": [ - { - "expected": "DELETE FROM session_logs WHERE ROWNUM <= ?", - "statement_metadata": { - "size": 18, - "tables": ["session_logs"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json deleted file mode 100644 index 95be2ac0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-returning-clause.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "DELETE FROM logs WHERE entry_date < SYSDATE RETURNING id INTO :deleted_ids;", - "outputs": [ - { - "expected": "DELETE FROM logs WHERE entry_date < SYSDATE RETURNING id INTO :deleted_ids", - "statement_metadata": { - "size": 10, - "tables": ["logs"], - "commands": ["DELETE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json deleted file mode 100644 index 7018cda6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/delete/delete-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "DELETE FROM logs WHERE entry_date < (SELECT MIN(order_date) FROM orders);", - "outputs": [ - { - "expected": "DELETE FROM logs WHERE entry_date < ( SELECT MIN ( order_date ) FROM orders )", - "statement_metadata": { - "size": 22, - "tables": ["logs", "orders"], - "commands": ["DELETE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "DELETE FROM logs WHERE entry_date < (SELECT MIN(order_date) FROM orders);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json deleted file mode 100644 index b7c00447..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-into-multiple-tables.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT ALL INTO sales (product_id, amount) VALUES (product_id, amount) INTO audit_log (action_type, message) VALUES ('INSERT', 'Inserted into sales') SELECT product_id, amount FROM temp_sales WHERE amount > 1000;", - "outputs": [ - { - "expected": "INSERT ALL INTO sales ( product_id, amount ) VALUES ( product_id, amount ) INTO audit_log ( action_type, message ) VALUES ( ? ) SELECT product_id, amount FROM temp_sales WHERE amount > ?", - "statement_metadata": { - "size": 36, - "tables": ["sales", "audit_log", "temp_sales"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json deleted file mode 100644 index e16049fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-all-multiple-conditions.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT ALL WHEN amount <= 100 THEN INTO small_orders (order_id, amount) VALUES (order_id, amount) WHEN amount BETWEEN 101 AND 500 THEN INTO medium_orders (order_id, amount) VALUES (order_id, amount) ELSE INTO large_orders (order_id, amount) VALUES (order_id, amount) SELECT order_id, amount FROM orders;", - "outputs": [ - { - "expected": "INSERT ALL WHEN amount <= ? THEN INTO small_orders ( order_id, amount ) VALUES ( order_id, amount ) WHEN amount BETWEEN ? AND ? THEN INTO medium_orders ( order_id, amount ) VALUES ( order_id, amount ) ELSE INTO large_orders ( order_id, amount ) VALUES ( order_id, amount ) SELECT order_id, amount FROM orders", - "statement_metadata": { - "size": 55, - "tables": ["small_orders", "medium_orders", "large_orders", "orders"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json deleted file mode 100644 index 96a9f576..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-basic.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customers (id, name, address) VALUES (101, 'John Doe', '123 Oracle Ln');", - "outputs": [ - { - "expected": "INSERT INTO customers ( id, name, address ) VALUES ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["customers"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json deleted file mode 100644 index 21c14044..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-using-decode.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO user_log (user_id, action, log_date) SELECT user_id, DECODE(activity_type, 'LOGIN', 'Logged In', 'LOGOUT', 'Logged Out', 'Unknown'), SYSDATE FROM user_activity;", - "outputs": [ - { - "expected": "INSERT INTO user_log ( user_id, action, log_date ) SELECT user_id, DECODE ( activity_type, ?, ?, ?, ?, ? ), SYSDATE FROM user_activity", - "statement_metadata": { - "size": 33, - "tables": ["user_log", "user_activity"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json deleted file mode 100644 index 6e5baa5d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-column-ordering.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO customer_addresses (address, city, customer_id) VALUES ('123 Main St', 'Anytown', 456);", - "outputs": [ - { - "expected": "INSERT INTO customer_addresses ( address, city, customer_id ) VALUES ( ? )", - "statement_metadata": { - "size": 24, - "tables": ["customer_addresses"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json deleted file mode 100644 index 10f8de21..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-returning-clause.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO transactions (account_id, amount) VALUES (123, 500) RETURNING transaction_id INTO :new_id;", - "outputs": [ - { - "expected": "INSERT INTO transactions ( account_id, amount ) VALUES ( ? ) RETURNING transaction_id INTO :new_id", - "statement_metadata": { - "size": 18, - "tables": ["transactions"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json deleted file mode 100644 index 19405ecb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-select-union.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO log (message) SELECT 'User logged in' FROM dual UNION ALL SELECT 'User performed an action' FROM dual;", - "outputs": [ - { - "expected": "INSERT INTO log ( message ) SELECT ? FROM dual UNION ALL SELECT ? FROM dual", - "statement_metadata": { - "size": 19, - "tables": ["log", "dual"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json deleted file mode 100644 index 155a7e93..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-sequence.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT INTO products (id, name, price) VALUES (product_seq.NEXTVAL, 'New Product', 99.99);", - "outputs": [ - { - "expected": "INSERT INTO products ( id, name, price ) VALUES ( product_seq.NEXTVAL, ?, ? )", - "statement_metadata": { - "size": 14, - "tables": ["products"], - "commands": ["INSERT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json deleted file mode 100644 index 56068700..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/insert-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "INSERT INTO orders (id, user_id, amount) SELECT order_seq.NEXTVAL, user_id, 100 FROM users WHERE status = 'active';", - "outputs": [ - { - "expected": "INSERT INTO orders ( id, user_id, amount ) SELECT order_seq.NEXTVAL, user_id, ? FROM users WHERE status = ?", - "statement_metadata": { - "size": 23, - "tables": ["orders", "users"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "INSERT INTO orders (id, user_id, amount) SELECT order_seq.NEXTVAL, user_id, ? FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json deleted file mode 100644 index e901ad84..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/insert/multitable-insert-conditional.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "INSERT FIRST INTO sales_audit (action) VALUES ('Sale occurred') WHEN amount > 1000 THEN INTO high_value_sales (sale_id, amount) VALUES (sale_id, amount) SELECT sale_id, amount FROM sales;", - "outputs": [ - { - "expected": "INSERT FIRST INTO sales_audit ( action ) VALUES ( ? ) WHEN amount > ? THEN INTO high_value_sales ( sale_id, amount ) VALUES ( sale_id, amount ) SELECT sale_id, amount FROM sales", - "statement_metadata": { - "size": 44, - "tables": ["sales_audit", "high_value_sales", "sales"], - "commands": ["INSERT", "SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json deleted file mode 100644 index 989dbefa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-in-out-params.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE CalculateDiscount(p_order_id IN NUMBER, p_discount OUT NUMBER) AS total_amount NUMBER; BEGIN SELECT SUM(price * quantity) INTO total_amount FROM order_items WHERE order_id = p_order_id; p_discount := total_amount * 0.1; END CalculateDiscount;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE CalculateDiscount(p_order_id IN NUMBER, p_discount OUT NUMBER) NUMBER; BEGIN SELECT SUM(price * quantity) INTO total_amount FROM order_items WHERE order_id = p_order_id; p_discount := total_amount * ?; END CalculateDiscount;", - "statement_metadata": { - "size": 57, - "tables": ["total_amount", "order_items"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["CalculateDiscount"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json deleted file mode 100644 index 7571ee8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-cursors.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE FetchCustomerOrders(p_customer_id IN NUMBER) IS CURSOR order_cursor IS SELECT * FROM orders WHERE customer_id = p_customer_id; order_rec order_cursor%ROWTYPE; BEGIN OPEN order_cursor; LOOP FETCH order_cursor INTO order_rec; EXIT WHEN order_cursor%NOTFOUND; END LOOP; CLOSE order_cursor; END FetchCustomerOrders;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE FetchCustomerOrders(p_customer_id IN NUMBER) IS CURSOR order_cursor IS SELECT * FROM orders WHERE customer_id = p_customer_id; order_rec order_cursor % ROWTYPE; BEGIN OPEN order_cursor; LOOP FETCH order_cursor INTO order_rec; EXIT WHEN order_cursor % NOTFOUND; END LOOP; CLOSE order_cursor; END FetchCustomerOrders;", - "statement_metadata": { - "size": 51, - "tables": ["orders", "order_rec"], - "commands": ["CREATE", "SELECT", "BEGIN"], - "comments": [], - "procedures": ["FetchCustomerOrders"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json deleted file mode 100644 index d7518955..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-procedure-with-exception-handling.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE DeleteCustomer(p_customer_id IN NUMBER) AS BEGIN DELETE FROM customers WHERE id = p_customer_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20001, 'Error deleting customer.'); END DeleteCustomer;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE DeleteCustomer(p_customer_id IN NUMBER) AS BEGIN DELETE FROM customers WHERE id = p_customer_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(?); END DeleteCustomer;", - "statement_metadata": { - "size": 40, - "tables": ["customers"], - "commands": ["CREATE", "BEGIN", "DELETE"], - "comments": [], - "procedures": ["DeleteCustomer"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json deleted file mode 100644 index 3bc5dd56..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/create-simple-stored-procedure.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE UpdateOrderStatus(p_order_id IN NUMBER, p_status IN VARCHAR2) AS BEGIN UPDATE orders SET status = p_status WHERE order_id = p_order_id; END UpdateOrderStatus;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE UpdateOrderStatus(p_order_id IN NUMBER, p_status IN VARCHAR?) AS BEGIN UPDATE orders SET status = p_status WHERE order_id = p_order_id; END UpdateOrderStatus;", - "statement_metadata": { - "size": 40, - "tables": ["orders"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": [], - "procedures": ["UpdateOrderStatus"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json deleted file mode 100644 index d225325d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/error-handling-exception.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE update_test_amt(p_employee_id NUMBER, p_change NUMBER) AS BEGIN UPDATE employees SET test_amt = test_amt + p_change WHERE employee_id = p_employee_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(-20001, 'Invalid test_amt update'); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE update_test_amt(p_employee_id NUMBER, p_change NUMBER) AS BEGIN UPDATE employees SET test_amt = test_amt + p_change WHERE employee_id = p_employee_id; EXCEPTION WHEN OTHERS THEN RAISE_APPLICATION_ERROR(?); END;", - "statement_metadata": { - "size": 41, - "tables": ["employees"], - "commands": ["CREATE", "BEGIN", "UPDATE"], - "comments": [], - "procedures": ["update_test_amt"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json deleted file mode 100644 index a1abd418..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure-with-exec.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "EXEC UpdateOrderStatus(123, 'Shipped');", - "outputs": [ - { - "expected": "EXEC UpdateOrderStatus(?);", - "statement_metadata": { - "size": 4, - "tables": [], - "commands": ["EXEC"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json deleted file mode 100644 index 91ad0a1a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/invoke-stored-procedure.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "BEGIN UpdateOrderStatus(123, 'Shipped'); END;", - "outputs": [ - { - "expected": "BEGIN UpdateOrderStatus(?); END;", - "statement_metadata": { - "size": 5, - "tables": [], - "commands": ["BEGIN"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json deleted file mode 100644 index aa27903d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/packages.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PACKAGE mgmt AS PROCEDURE test_proc_1(p_name VARCHAR2); PROCEDURE test_proc_2(p_id NUMBER); END mgmt;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PACKAGE mgmt AS PROCEDURE test_proc_1(p_name VARCHAR?); PROCEDURE test_proc_2(p_id NUMBER); END mgmt;", - "statement_metadata": { - "size": 28, - "tables": [], - "commands": ["CREATE"], - "comments": [], - "procedures": ["test_proc_1", "test_proc_2"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json deleted file mode 100644 index 5ec81089..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/pipelined-functions.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_departments RETURN dept_t PIPELINED AS BEGIN FOR r IN (SELECT * FROM departments) LOOP PIPE ROW(r); END LOOP; RETURN; END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_departments RETURN dept_t PIPELINED AS BEGIN FOR r IN (SELECT * FROM departments) LOOP PIPE ROW(r); END LOOP; RETURN; END;", - "statement_metadata": { - "size": 28, - "tables": ["departments"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json deleted file mode 100644 index a6ffffbe..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/stored-procedures-functions.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE PROCEDURE get_employee_count(p_dept_id IN NUMBER, p_count OUT NUMBER) AS BEGIN SELECT COUNT(*) INTO p_count FROM employees WHERE department_id = p_dept_id; END; BEGIN get_employee_count(10, :count); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE PROCEDURE get_employee_count(p_dept_id IN NUMBER, p_count OUT NUMBER) AS BEGIN SELECT COUNT(*) INTO p_count FROM employees WHERE department_id = p_dept_id; END; BEGIN get_employee_count(?, :count); END;", - "statement_metadata": { - "size": 51, - "tables": ["p_count", "employees"], - "commands": ["CREATE", "BEGIN", "SELECT"], - "comments": [], - "procedures": ["get_employee_count"] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json deleted file mode 100644 index 8ff82d4d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/procedure/triggers.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "input": "CREATE OR REPLACE TRIGGER audit_table AFTER INSERT ON logs FOR EACH ROW BEGIN INSERT INTO audit_log (action) VALUES ('Inserted new log'); END;", - "outputs": [ - { - "expected": "CREATE OR REPLACE TRIGGER audit_table AFTER INSERT ON logs FOR EACH ROW BEGIN INSERT INTO audit_log (action) VALUES (?); END;", - "statement_metadata": { - "size": 26, - "tables": ["audit_log"], - "commands": ["CREATE", "INSERT", "BEGIN"], - "comments": [], - "procedures": [] - }, - "obfuscator_config": { - "replace_digits": true - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json deleted file mode 100644 index f801075a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/complex-join-operations.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e JOIN departments d ON e.department_id = d.department_id WHERE e.test_amt > (SELECT AVG(test_amt) FROM employees WHERE department_id = e.department_id);", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e JOIN departments d ON e.department_id = d.department_id WHERE e.test_amt > ( SELECT AVG ( test_amt ) FROM employees WHERE department_id = e.department_id )", - "statement_metadata": { - "size": 30, - "tables": ["employees", "departments"], - "commands": ["SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json deleted file mode 100644 index 89a6981c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/full-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ FULL(e) */ employee_id, first_name, last_name FROM employees e WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT employee_id, first_name, last_name FROM employees e WHERE department_id = ?;", - "statement_metadata": { - "size": 29, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ FULL(e) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json deleted file mode 100644 index 3881a255..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/hierarchical-queries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json deleted file mode 100644 index cd6e00ac..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/index-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ INDEX(e employee_index) */ employee_id, first_name, last_name FROM employees e WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT employee_id, first_name, last_name FROM employees e WHERE department_id = ?;", - "statement_metadata": { - "size": 45, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ INDEX(e employee_index) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json deleted file mode 100644 index eae0403c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/large-objects-lobs.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, DBMS_LOB.SUBSTR(blob_data, 2000, 1) as blob_content, DBMS_LOB.SUBSTR(clob_data, 2000, 1) as clob_content FROM lob_test WHERE id = 1;", - "outputs": [ - { - "expected": "SELECT id, DBMS_LOB.SUBSTR ( blob_data, ?, ? ), DBMS_LOB.SUBSTR ( clob_data, ?, ? ) FROM lob_test WHERE id = ?", - "statement_metadata": { - "size": 14, - "tables": ["lob_test"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json deleted file mode 100644 index deddd94d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/multiple-hints.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ LEADING(e) USE_HASH(d) */ e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "statement_metadata": { - "size": 44, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ LEADING(e) USE_HASH(d) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json deleted file mode 100644 index 6a19aca0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/optimizer-mode-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ ALL_ROWS */ order_id, description FROM orders WHERE price > 100;", - "outputs": [ - { - "expected": "SELECT order_id, description FROM orders WHERE price > ?;", - "statement_metadata": { - "size": 27, - "tables": ["orders"], - "commands": ["SELECT"], - "comments": ["/*+ ALL_ROWS */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json deleted file mode 100644 index 33beecd0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/oracle-text.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT id, title FROM articles WHERE CONTAINS(text, 'Oracle', 1) > 0;", - "outputs": [ - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS ( text, ?, ? ) > ?", - "statement_metadata": { - "size": 14, - "tables": ["articles"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json deleted file mode 100644 index bf49d9cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-case-sensitive.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = 'Shipped';", - "outputs": [ - { - "expected": "SELECT OrderId, OrderDate, CustomerName FROM Sales.Orders WHERE OrderStatus = ?", - "statement_metadata": { - "size": 18, - "tables": ["Sales.Orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true, - "Keep_trailing_semicolon": true - }, - "expected": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = ?;" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json deleted file mode 100644 index 89ac3e55..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/quoted-identifiers-special-characters.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE '%Gadget%';", - "outputs": [ - { - "expected": "SELECT * FROM Sales.Order-Details WHERE Product#Name LIKE ?", - "statement_metadata": { - "size": 25, - "tables": ["Sales.Order-Details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true, - "Keep_trailing_semicolon": true - }, - "expected": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE ?;" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json deleted file mode 100644 index 8524a7b2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/recursive-cte.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "WITH RECURSIVE subordinates AS (SELECT employee_id, manager_id FROM employees WHERE manager_id IS NULL UNION ALL SELECT e.employee_id, e.manager_id FROM employees e JOIN subordinates s ON e.manager_id = s.employee_id) SELECT * FROM subordinates;", - "outputs": [ - { - "expected": "WITH RECURSIVE subordinates AS ( SELECT employee_id, manager_id FROM employees WHERE manager_id IS ? UNION ALL SELECT e.employee_id, e.manager_id FROM employees e JOIN subordinates s ON e.manager_id = s.employee_id ) SELECT * FROM subordinates", - "statement_metadata": { - "size": 31, - "tables": ["employees", "subordinates"], - "commands": [ "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json deleted file mode 100644 index d3b8b73f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-basic-conditions.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT id, name FROM users WHERE age > 30 AND status = 'active';", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE age > ? AND status = ?", - "statement_metadata": { - "size": 11, - "tables": ["users"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE age > ? AND status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json deleted file mode 100644 index 947494a6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-hierarchical-query.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT employee_id, last_name, manager_id FROM employees START WITH manager_id IS NULL CONNECT BY PRIOR employee_id = manager_id;", - "normalizer_config": { - "keep_trailing_semicolon": true - }, - "obfuscator_config": { - "replace_boolean":false - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json deleted file mode 100644 index 7878c10a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-oracle-text.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT id, title FROM articles WHERE CONTAINS(text, 'Oracle', 1) > 0;", - "outputs": [ - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS ( text, ?, ? ) > ?", - "statement_metadata": { - "size": 14, - "tables": ["articles"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT id, title FROM articles WHERE CONTAINS(text, ?, ?) > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json deleted file mode 100644 index 6bb2fc58..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-using-with-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "WITH dept_costs AS (SELECT department_id, SUM(test_amt) AS total_sal FROM employees GROUP BY department_id) SELECT * FROM dept_costs WHERE total_sal > (SELECT AVG(total_sal) FROM dept_costs);", - "outputs": [ - { - "expected": "WITH dept_costs AS ( SELECT department_id, SUM ( test_amt ) FROM employees GROUP BY department_id ) SELECT * FROM dept_costs WHERE total_sal > ( SELECT AVG ( total_sal ) FROM dept_costs )", - "statement_metadata": { - "size": 25, - "tables": ["employees", "dept_costs"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "WITH dept_costs AS (SELECT department_id, SUM(test_amt) FROM employees GROUP BY department_id) SELECT * FROM dept_costs WHERE total_sal > (SELECT AVG(total_sal) FROM dept_costs);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json deleted file mode 100644 index 1c6c6d61..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-flashback-query.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP('2023-03-15 08:30:00', 'YYYY-MM-DD HH24:MI:SS') WHERE department_id = 10;", - "outputs": [ - { - "expected": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP ( ? ) WHERE department_id = ?", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM employees AS OF TIMESTAMP TO_TIMESTAMP(?) WHERE department_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json deleted file mode 100644 index 17f3fd95..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-model-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM (SELECT year, product, amount FROM sales) MODEL DIMENSION BY (year) MEASURES (product, amount) RULES (amount['2023'] = amount['2022'] * 1.1);", - "outputs": [ - { - "expected": "SELECT * FROM ( SELECT year, product, amount FROM sales ) MODEL DIMENSION BY ( year ) MEASURES ( product, amount ) RULES ( amount [ ? ] = amount [ ? ] * ? )", - "statement_metadata": { - "size": 11, - "tables": ["sales"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM (SELECT year, product, amount FROM sales) MODEL DIMENSION BY (year) MEASURES (product, amount) RULES (amount [?] = amount [?] * ?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json deleted file mode 100644 index 2a9fd894..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-multi-line-comments.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT /* Multi-line\n comment */ id, name FROM users WHERE status = 'active';", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE status = ?", - "statement_metadata": { - "size": 36, - "tables": ["users"], - "commands": ["SELECT"], - "comments": ["/* Multi-line\n comment */"], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json deleted file mode 100644 index 06b165af..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-oracle-specific-joins.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id(+);", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id ( + )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT e.employee_id, e.last_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id(+);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json deleted file mode 100644 index 86253a75..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-partition-by.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT department_id, last_name, test_amt, AVG(test_amt) OVER (PARTITION BY department_id) AS avg_dept_test_amt FROM employees;", - "outputs": [ - { - "expected": "SELECT department_id, last_name, test_amt, AVG ( test_amt ) OVER ( PARTITION BY department_id ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT department_id, last_name, test_amt, AVG(test_amt) OVER (PARTITION BY department_id) FROM employees;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json deleted file mode 100644 index a98bd682..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-pseudocolumns.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= 10 CONNECT BY PRIOR employee_id = manager_id;", - "outputs": [ - { - "expected": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= ? CONNECT BY PRIOR employee_id = manager_id", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT LEVEL, ROWNUM, employee_id, last_name FROM employees WHERE ROWNUM <= ? CONNECT BY PRIOR employee_id = manager_id;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json deleted file mode 100644 index bc123857..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-rollup-function.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT department_id, job_id, SUM(test_amt) total_test_amt FROM employees GROUP BY ROLLUP (department_id, job_id);", - "outputs": [ - { - "expected": "SELECT department_id, job_id, SUM ( test_amt ) total_test_amt FROM employees GROUP BY ROLLUP ( department_id, job_id )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT department_id, job_id, SUM(test_amt) total_test_amt FROM employees GROUP BY ROLLUP (department_id, job_id);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json deleted file mode 100644 index 74cc06b7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-sample-clause.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT * FROM employees SAMPLE (10);", - "outputs": [ - { - "expected": "SELECT * FROM employees SAMPLE ( ? )", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM employees SAMPLE (?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json deleted file mode 100644 index 7f32afdb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-single-line-comments.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT id, name FROM users WHERE status = 'active'; -- Single-line comment explaining the query", - "outputs": [ - { - "expected": "SELECT id, name FROM users WHERE status = ?", - "statement_metadata": { - "size": 54, - "tables": ["users"], - "commands": ["SELECT"], - "comments": ["-- Single-line comment explaining the query"], - "procedures": [] - } - }, - { - "expected": "SELECT id, name FROM users WHERE status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json deleted file mode 100644 index 72326d19..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/select-with-skip-locked.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE order_status = 'PENDING' FOR UPDATE SKIP LOCKED;", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE order_status = ? FOR UPDATE SKIP LOCKED", - "statement_metadata": { - "size": 18, - "tables": ["orders"], - "commands": ["SELECT", "UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT * FROM orders WHERE order_status = ? FOR UPDATE SKIP LOCKED;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json deleted file mode 100644 index a84e0adc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/use-nl-hint.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "SELECT /*+ USE_NL(e d) */ e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "outputs": [ - { - "expected": "SELECT e.employee_id, e.first_name, d.department_name FROM employees e, departments d WHERE e.department_id = d.department_id;", - "statement_metadata": { - "size": 33, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": ["/*+ USE_NL(e d) */"], - "procedures": [] - }, - "normalizer_config": { - "collect_tables": true, - "collect_commands": true, - "collect_comments": true, - "collect_procedure": true, - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json deleted file mode 100644 index a0af2d82..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/select/window-functions-analytics.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "SELECT employee_id, test_amt, AVG(yoe) OVER (PARTITION BY department_id) AS avg_department_yoe FROM employees;", - "outputs": [ - { - "expected": "SELECT employee_id, test_amt, AVG ( yoe ) OVER ( PARTITION BY department_id ) FROM employees", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json deleted file mode 100644 index 949243d5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-case.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE employees SET test_amt = CASE WHEN job_id = 'XX' THEN test_amt * 1.10 WHEN job_id = 'YY' THEN test_amt * 1.20 ELSE test_amt END;", - "outputs": [ - { - "expected": "UPDATE employees SET test_amt = CASE WHEN job_id = ? THEN test_amt * ? WHEN job_id = ? THEN test_amt * ? ELSE test_amt END", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE employees SET test_amt = CASE WHEN job_id = ? THEN test_amt * ? WHEN job_id = ? THEN test_amt * ? ELSE test_amt END;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json deleted file mode 100644 index 5f9e2398..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/conditional-update-with-decode.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE order_items SET discount = DECODE(quantity, 10, 5, 20, 10, 0) WHERE order_id = 456;", - "outputs": [ - { - "expected": "UPDATE order_items SET discount = DECODE ( quantity, ?, ?, ?, ?, ? ) WHERE order_id = ?", - "statement_metadata": { - "size": 17, - "tables": ["order_items"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE order_items SET discount = DECODE(quantity, ?, ?, ?, ?, ?) WHERE order_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json deleted file mode 100644 index 91cdcb8c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/dynamic-plsql.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "input": "BEGIN EXECUTE IMMEDIATE 'UPDATE logs SET retention = retention * 1.1'; END;", - "outputs": [ - { - "expected": "BEGIN EXECUTE IMMEDIATE ?; END", - "statement_metadata": { - "size": 12, - "tables": [], - "commands": ["BEGIN", "EXECUTE"], - "comments": [], - "procedures": [] - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json deleted file mode 100644 index d5ee0289..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-basic.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE employees SET test_amt = test_amt * 1.05 WHERE department_id = 3;", - "outputs": [ - { - "expected": "UPDATE employees SET test_amt = test_amt * ? WHERE department_id = ?", - "statement_metadata": { - "size": 15, - "tables": ["employees"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE employees SET test_amt = test_amt * ? WHERE department_id = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json deleted file mode 100644 index a7f6baea..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-oracle-specific-syntax.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE (SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id) t SET t.test_amt = t.test_amt * 1.05, t.budget = t.budget - 1000;", - "outputs": [ - { - "expected": "UPDATE ( SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id ) t SET t.test_amt = t.test_amt * ?, t.budget = t.budget - ?", - "statement_metadata": { - "size": 36, - "tables": ["employees", "departments"], - "commands": ["UPDATE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE (SELECT e.test_amt, d.budget FROM employees e JOIN departments d ON e.department_id = d.id) t SET t.test_amt = t.test_amt * ?, t.budget = t.budget - ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json deleted file mode 100644 index 3ba8e9d5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-correlated-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE orders o SET o.status = 'DELAYED' WHERE EXISTS (SELECT 1 FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE);", - "outputs": [ - { - "expected": "UPDATE orders o SET o.status = ? WHERE EXISTS ( SELECT ? FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE )", - "statement_metadata": { - "size": 27, - "tables": ["orders", "shipments"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE orders o SET o.status = ? WHERE EXISTS (SELECT ? FROM shipments s WHERE s.order_id = o.id AND s.estimated_arrival < SYSDATE);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json deleted file mode 100644 index 18d78d25..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-using-join-syntax.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE (SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id) SET account_balance = account_balance + transaction_amount;", - "outputs": [ - { - "expected": "UPDATE ( SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id ) SET account_balance = account_balance + transaction_amount", - "statement_metadata": { - "size": 36, - "tables": ["accounts", "transactions"], - "commands": ["UPDATE", "SELECT", "JOIN"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE (SELECT a.account_balance, t.transaction_amount FROM accounts a JOIN transactions t ON a.account_id = t.account_id) SET account_balance = account_balance + transaction_amount;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json deleted file mode 100644 index 5846dbc7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-correlated-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE customer_orders co SET total_amount = (SELECT SUM(oi.price * oi.quantity) FROM order_items oi WHERE oi.order_id = co.id) WHERE co.status = 'Pending';", - "outputs": [ - { - "expected": "UPDATE customer_orders co SET total_amount = ( SELECT SUM ( oi.price * oi.quantity ) FROM order_items oi WHERE oi.order_id = co.id ) WHERE co.status = ?", - "statement_metadata": { - "size": 38, - "tables": ["customer_orders", "order_items"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE customer_orders co SET total_amount = (SELECT SUM(oi.price * oi.quantity) FROM order_items oi WHERE oi.order_id = co.id) WHERE co.status = ?;", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json deleted file mode 100644 index 24d901a8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-join.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE products p SET p.price = p.price * 1.1 FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > 4;", - "outputs": [ - { - "expected": "UPDATE products p SET p.price = p.price * ? FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > ?", - "statement_metadata": { - "size": 23, - "tables": ["products", "suppliers"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products p SET p.price = p.price * ? FROM suppliers s WHERE p.supplier_id = s.id AND s.rating > ?;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json deleted file mode 100644 index 8f9c17fa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-returning-clause.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "UPDATE orders SET order_status = 'Completed' WHERE order_id = 123 RETURNING customer_id, order_total INTO :cust_id, :total;", - "outputs": [ - { - "expected": "UPDATE orders SET order_status = ? WHERE order_id = ? RETURNING customer_id, order_total INTO :cust_id, :total", - "statement_metadata": { - "size": 12, - "tables": ["orders"], - "commands": ["UPDATE"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE orders SET order_status = ? WHERE order_id = ? RETURNING customer_id, order_total INTO :cust_id, :total;", - "normalizer_config": { - "keep_trailing_semicolon": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json deleted file mode 100644 index d5a81c9e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery-in-set.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE products p SET (p.price, p.stock) = (SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id) WHERE EXISTS (SELECT 1 FROM sale_items s WHERE s.product_id = p.id);", - "outputs": [ - { - "expected": "UPDATE products p SET ( p.price, p.stock ) = ( SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id ) WHERE EXISTS ( SELECT ? FROM sale_items s WHERE s.product_id = p.id )", - "statement_metadata": { - "size": 30, - "tables": ["products", "sale_items"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products p SET (p.price, p.stock) = (SELECT s.discounted_price, s.quantity FROM sale_items s WHERE s.product_id = p.id) WHERE EXISTS (SELECT ? FROM sale_items s WHERE s.product_id = p.id);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json deleted file mode 100644 index 740712c5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/oracle/update/update-with-subquery.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "UPDATE products SET price = price * 0.9 WHERE id IN (SELECT product_id FROM inventory WHERE quantity > 100);", - "outputs": [ - { - "expected": "UPDATE products SET price = price * ? WHERE id IN ( SELECT product_id FROM inventory WHERE quantity > ? )", - "statement_metadata": { - "size": 29, - "tables": ["products", "inventory"], - "commands": ["UPDATE", "SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products SET price = price * ? WHERE id IN (SELECT product_id FROM inventory WHERE quantity > ?);", - "normalizer_config": { - "keep_trailing_semicolon": true, - "remove_space_between_parentheses": true - } - } - ] - } - \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json deleted file mode 100644 index ef558339..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/delete-complex-subqueries-joins.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM \n users u\nUSING \n orders o,\n order_items oi,\n products p\nWHERE \n u.id = o.user_id\nAND o.id = oi.order_id\nAND oi.product_id = p.id\nAND p.category = 'obsolete'\nAND o.order_date < NOW() - INTERVAL '5 years';", - "outputs": [ - { - "expected": "DELETE FROM users u USING orders o, order_items oi, products p WHERE u.id = o.user_id AND o.id = oi.order_id AND oi.product_id = p.id AND p.category = ? AND o.order_date < NOW ( ) - INTERVAL ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json deleted file mode 100644 index 841ce4b9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/insert-complex-select-joins.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "input": "INSERT INTO order_summaries (order_id, product_count, total_amount, average_product_price)\nSELECT \n o.id,\n COUNT(p.id),\n SUM(oi.amount),\n AVG(p.price)\nFROM \n orders o\nJOIN order_items oi ON o.id = oi.order_id\nJOIN products p ON oi.product_id = p.id\nGROUP BY \n o.id\nHAVING \n SUM(oi.amount) > 1000;", - "outputs": [ - { - "expected": "INSERT INTO order_summaries ( order_id, product_count, total_amount, average_product_price ) SELECT o.id, COUNT ( p.id ), SUM ( oi.amount ), AVG ( p.price ) FROM orders o JOIN order_items oi ON o.id = oi.order_id JOIN products p ON oi.product_id = p.id GROUP BY o.id HAVING SUM ( oi.amount ) > ?", - "statement_metadata": { - "size": 56, - "tables": [ - "order_summaries", - "orders", - "order_items", - "products" - ], - "commands": [ - "INSERT", - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json deleted file mode 100644 index 1b2126a2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-aggregates-subqueries.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "input": "SELECT \n u.id,\n u.name,\n (SELECT COUNT(*) FROM orders o WHERE o.user_id = u.id) AS order_count,\n (SELECT SUM(amount) FROM payments p WHERE p.user_id = u.id) AS total_payments,\n (SELECT AVG(rating) FROM reviews r WHERE r.user_id = u.id) AS average_rating\nFROM \n users u\nWHERE \n EXISTS (\n SELECT 1 FROM logins l WHERE l.user_id = u.id AND l.time > NOW() - INTERVAL '1 month'\n )\nAND u.status = 'active'\nORDER BY \n total_payments DESC, average_rating DESC, order_count DESC\nLIMIT 10;", - "outputs": [ - { - "expected": "SELECT u.id, u.name, ( SELECT COUNT ( * ) FROM orders o WHERE o.user_id = u.id ), ( SELECT SUM ( amount ) FROM payments p WHERE p.user_id = u.id ), ( SELECT AVG ( rating ) FROM reviews r WHERE r.user_id = u.id ) FROM users u WHERE EXISTS ( SELECT ? FROM logins l WHERE l.user_id = u.id AND l.time > NOW ( ) - INTERVAL ? ) AND u.status = ? ORDER BY total_payments DESC, average_rating DESC, order_count DESC LIMIT ?", - "statement_metadata": { - "size": 38, - "tables": [ - "orders", - "payments", - "reviews", - "users", - "logins" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json deleted file mode 100644 index 71029e52..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-complex-joins-window-functions.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT \n e1.name AS employee_name,\n e1.test_amt,\n e2.name AS manager_name,\n AVG(e2.test_amt) OVER (PARTITION BY e1.manager_id) AS avg_manager_test_amt,\n RANK() OVER (ORDER BY e1.test_amt DESC) AS test_amt_rank\nFROM \n employees e1\nLEFT JOIN employees e2 ON e1.manager_id = e2.id\nWHERE \n e1.department_id IN (SELECT id FROM departments WHERE name LIKE 'IT%')\nAND \n e1.hire_date > '2020-01-01'\nORDER BY \n test_amt_rank, avg_manager_test_amt DESC;", - "outputs": [ - { - "expected": "SELECT e?.name, e?.test_amt, e?.name, AVG ( e?.test_amt ) OVER ( PARTITION BY e?.manager_id ), RANK ( ) OVER ( ORDER BY e?.test_amt DESC ) FROM employees e? LEFT JOIN employees e? ON e?.manager_id = e?.id WHERE e?.department_id IN ( SELECT id FROM departments WHERE name LIKE ? ) AND e?.hire_date > ? ORDER BY test_amt_rank, avg_manager_test_amt DESC", - "statement_metadata": { - "size": 30, - "tables": [ - "employees", - "departments" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json deleted file mode 100644 index d8642455..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/select-nested-subqueries-aggregates-limits.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT \n user_id,\n order_id,\n order_total,\n user_total\nFROM (\n SELECT \n o.user_id,\n o.id AS order_id,\n o.total AS order_total,\n (SELECT SUM(total) FROM orders WHERE user_id = o.user_id) AS user_total,\n RANK() OVER (PARTITION BY o.user_id ORDER BY o.total DESC) AS rnk\n FROM \n orders o\n) sub\nWHERE \n sub.rnk = 1\nAND user_total > (\n SELECT \n AVG(total) * 2 \n FROM orders\n);", - "outputs": [ - { - "expected": "SELECT user_id, order_id, order_total, user_total FROM ( SELECT o.user_id, o.id, o.total, ( SELECT SUM ( total ) FROM orders WHERE user_id = o.user_id ), RANK ( ) OVER ( PARTITION BY o.user_id ORDER BY o.total DESC ) FROM orders o ) sub WHERE sub.rnk = ? AND user_total > ( SELECT AVG ( total ) * ? FROM orders )", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json deleted file mode 100644 index 15d51e02..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/complex/update-complex-subquery-conditional.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "UPDATE \n products p\nSET \n price = CASE \n WHEN p.stock < 10 THEN p.price * 1.10\n WHEN p.stock BETWEEN 10 AND 50 THEN p.price\n ELSE p.price * 0.90\n END,\n last_updated = NOW()\nFROM (\n SELECT \n product_id, \n SUM(quantity) AS stock\n FROM \n inventory\n GROUP BY \n product_id\n) AS sub\nWHERE \n sub.product_id = p.id;", - "outputs": [ - { - "expected": "UPDATE products p SET price = CASE WHEN p.stock < ? THEN p.price * ? WHEN p.stock BETWEEN ? AND ? THEN p.price ELSE p.price * ? END, last_updated = NOW ( ) FROM ( SELECT product_id, SUM ( quantity ) FROM inventory GROUP BY product_id ) WHERE sub.product_id = p.id", - "statement_metadata": { - "size": 29, - "tables": [ - "products", - "inventory" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json deleted file mode 100644 index 96eb980f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-all-rows.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM temp_table;", - "outputs": [ - { - "expected": "DELETE FROM temp_table", - "statement_metadata": { - "size": 16, - "tables": [ - "temp_table" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json deleted file mode 100644 index 772ac106..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-returning.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM orders WHERE id = 8 RETURNING *;", - "outputs": [ - { - "expected": "DELETE FROM orders WHERE id = ? RETURNING *", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json deleted file mode 100644 index 0ddcff7a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-simple.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM users WHERE id = 7;", - "outputs": [ - { - "expected": "DELETE FROM users WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json deleted file mode 100644 index 60d22f4a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-using-join.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "DELETE FROM user_logins USING users WHERE user_logins.user_id = users.id AND users.status = 'inactive';", - "outputs": [ - { - "expected": "DELETE FROM user_logins USING users WHERE user_logins.user_id = users.id AND users.status = ?", - "statement_metadata": { - "size": 17, - "tables": [ - "user_logins" - ], - "commands": [ - "DELETE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json deleted file mode 100644 index e721079c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH deleted AS (\n DELETE FROM users WHERE last_login < NOW() - INTERVAL '2 years' RETURNING *\n)\nSELECT * FROM deleted;", - "outputs": [ - { - "expected": "WITH deleted AS ( DELETE FROM users WHERE last_login < NOW ( ) - INTERVAL ? RETURNING * ) SELECT * FROM deleted", - "statement_metadata": { - "size": 24, - "tables": [ - "users", - "deleted" - ], - "commands": [ - "DELETE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json deleted file mode 100644 index 8857f3aa..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/delete/delete-with-subquery.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "DELETE FROM comments WHERE user_id IN (SELECT id FROM users WHERE status = 'banned');", - "outputs": [ - { - "expected": "DELETE FROM comments WHERE user_id IN ( SELECT id FROM users WHERE status = ? )", - "statement_metadata": { - "size": 25, - "tables": [ - "comments", - "users" - ], - "commands": [ - "DELETE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json deleted file mode 100644 index 3bb0c7cd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-that-raises-notice.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION log_activity(activity text) RETURNS void AS $func$\nBEGIN\n RAISE NOTICE 'Activity: %', activity;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION log_activity ( activity text ) RETURNS void AS $func$BEGIN RAISE NOTICE ?, activity; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION log_activity ( activity text ) RETURNS void AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json deleted file mode 100644 index 8804741e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-dynamic-query.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION dynamic_query(sql_query text) RETURNS SETOF RECORD AS $func$\nBEGIN\n RETURN QUERY EXECUTE sql_query;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION dynamic_query ( sql_query text ) RETURNS SETOF RECORD AS $func$BEGIN RETURN QUERY EXECUTE sql_query; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION dynamic_query ( sql_query text ) RETURNS SETOF RECORD AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json deleted file mode 100644 index 18f8e7f1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_user_email(user_id integer) RETURNS text AS $func$\nBEGIN\n RETURN (SELECT email FROM users WHERE id = user_id);\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_user_email ( user_id integer ) RETURNS text AS $func$BEGIN RETURN ( SELECT email FROM users WHERE id = user_id ); END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_user_email ( user_id integer ) RETURNS text AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json deleted file mode 100644 index dd9092d1..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-function-with-table-return.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_users() RETURNS TABLE(user_id integer, user_name text) AS $func$\nBEGIN\n RETURN QUERY SELECT id, name FROM users;\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_users ( ) RETURNS TABLE ( user_id integer, user_name text ) AS $func$BEGIN RETURN QUERY SELECT id, name FROM users; END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_users ( ) RETURNS TABLE ( user_id integer, user_name text ) AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json deleted file mode 100644 index 0147b7d2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/create-simple-plpgsql-function.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "CREATE OR REPLACE FUNCTION get_user_count() RETURNS integer AS $func$\nBEGIN\n RETURN (SELECT COUNT(*) FROM users);\nEND;\n$func$ LANGUAGE plpgsql;", - "outputs": [ - { - "expected": "CREATE OR REPLACE FUNCTION get_user_count ( ) RETURNS integer AS $func$BEGIN RETURN ( SELECT COUNT ( * ) FROM users ); END$func$ LANGUAGE plpgsql" - }, - { - "obfuscator_config": { - "dollar_quoted_func": false - }, - "expected": "CREATE OR REPLACE FUNCTION get_user_count ( ) RETURNS integer AS ? LANGUAGE plpgsql" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json deleted file mode 100644 index fe57a79b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-positional-parameters.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "input": "SELECT calculate_discount($1, $2);", - "outputs": [ - { - "expected": "SELECT calculate_discount ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "normalizer_config": { - "remove_space_between_parentheses": true - }, - "expected": "SELECT calculate_discount($1, $2)" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json deleted file mode 100644 index 2b80ec4b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-returning-table.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT * FROM get_users();", - "outputs": [ - { - "expected": "SELECT * FROM get_users ( )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json deleted file mode 100644 index 7e627ece..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-that-raises-notice.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT log_activity('User logged in');", - "outputs": [ - { - "expected": "SELECT log_activity ( ? )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json deleted file mode 100644 index 492c03bc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-dynamic-query.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT * FROM dynamic_query('SELECT * FROM users WHERE id = 1') AS t(id integer, name text, email text);", - "outputs": [ - { - "expected": "SELECT * FROM dynamic_query ( ? ) AS t ( id integer, name text, email text )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json deleted file mode 100644 index 0c4d1d5d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-function-with-parameter.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT get_user_email(1);", - "outputs": [ - { - "expected": "SELECT get_user_email ( ? )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json deleted file mode 100644 index 3af547cc..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/function/invoke-simple-function.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": "SELECT get_user_count();", - "outputs": [ - { - "expected": "SELECT get_user_count ( )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json deleted file mode 100644 index 0887b145..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-array-data.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "INSERT INTO users (name, favorite_numbers) VALUES ('Array User', ARRAY[3, 6, 9]);", - "outputs": [ - { - "expected": "INSERT INTO users ( name, favorite_numbers ) VALUES ( ?, ARRAY [ ? ] )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "INSERT INTO users (name, favorite_numbers) VALUES (?, ARRAY [?])", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json deleted file mode 100644 index 7a9c6559..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-json-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO events (data) VALUES ('{\"type\": \"user_signup\", \"user_id\": 1}');", - "outputs": [ - { - "expected": "INSERT INTO events ( data ) VALUES ( ? )", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json deleted file mode 100644 index 7101dc7f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-multiple-rows.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('Jane Doe', 'jane@example.com'), ('Bob Smith', 'bob@example.com');", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? ), ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json deleted file mode 100644 index b85086c5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "INSERT INTO users (name, email, age) VALUES ($1, $2, $3);", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email, age ) VALUES ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "INSERT INTO users ( name, email, age ) VALUES ( $1, $2, $3 )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json deleted file mode 100644 index d9f4e220..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-returning-positional-parameter.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "INSERT INTO orders (product_id, quantity, total) VALUES ($1, $2, $3) RETURNING id;", - "outputs": [ - { - "expected": "INSERT INTO orders ( product_id, quantity, total ) VALUES ( ? ) RETURNING id" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "INSERT INTO orders ( product_id, quantity, total ) VALUES ( $1, $2, $3 ) RETURNING id" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json deleted file mode 100644 index 88bbe702..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-simple-row.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('John Doe', 'john@example.com');", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json deleted file mode 100644 index 0372dab6..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-do-nothing.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (id, name, email) VALUES (1, 'Duplicate', 'duplicate@example.com') ON CONFLICT (id) DO NOTHING;", - "outputs": [ - { - "expected": "INSERT INTO users ( id, name, email ) VALUES ( ? ) ON CONFLICT ( id ) DO NOTHING", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json deleted file mode 100644 index 428cebb5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-conflict-update.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "INSERT INTO users (id, name, email) VALUES (1, 'Duplicate', 'duplicate@example.com') ON CONFLICT (id) DO UPDATE SET email = EXCLUDED.email;", - "outputs": [ - { - "expected": "INSERT INTO users ( id, name, email ) VALUES ( ? ) ON CONFLICT ( id ) DO UPDATE SET email = EXCLUDED.email", - "statement_metadata": { - "size": 17, - "tables": [ - "users" - ], - "commands": [ - "INSERT", - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json deleted file mode 100644 index 1f3a69f0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-default.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO products (name, price, description) VALUES ('New Product', 123, DEFAULT);", - "outputs": [ - { - "expected": "INSERT INTO products ( name, price, description ) VALUES ( ?, DEFAULT )", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json deleted file mode 100644 index 0bb98b29..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-enum-type.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO shipments (status) VALUES ('delivered'::shipment_status);", - "outputs": [ - { - "expected": "INSERT INTO shipments ( status ) VALUES ( ? :: shipment_status )", - "statement_metadata": { - "size": 15, - "tables": [ - "shipments" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json deleted file mode 100644 index bbae656c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-geometric-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO places (name, location) VALUES ('Point Place', point '(10, 20)');", - "outputs": [ - { - "expected": "INSERT INTO places ( name, location ) VALUES ( ?, point ? )", - "statement_metadata": { - "size": 12, - "tables": [ - "places" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json deleted file mode 100644 index a71f5fba..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-hstore-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO user_profiles (profile) VALUES ('\"height\"=>\"2m\", \"weight\"=>\"70kg\"');", - "outputs": [ - { - "expected": "INSERT INTO user_profiles ( profile ) VALUES ( ? )", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json deleted file mode 100644 index c17593e5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-range-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO reservations (during) VALUES ('[2023-01-01 14:00, 2023-01-01 15:00)');", - "outputs": [ - { - "expected": "INSERT INTO reservations ( during ) VALUES ( ? )", - "statement_metadata": { - "size": 18, - "tables": [ - "reservations" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json deleted file mode 100644 index b75c50fd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-returning.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "INSERT INTO users (name, email) VALUES ('Alice Jones', 'alice@example.com') RETURNING id;", - "outputs": [ - { - "expected": "INSERT INTO users ( name, email ) VALUES ( ? ) RETURNING id", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "INSERT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json deleted file mode 100644 index 5325d6b5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-select.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "INSERT INTO user_logins (user_id, login_time) SELECT id, NOW() FROM users WHERE active;", - "outputs": [ - { - "expected": "INSERT INTO user_logins ( user_id, login_time ) SELECT id, NOW ( ) FROM users WHERE active", - "statement_metadata": { - "size": 28, - "tables": [ - "user_logins", - "users" - ], - "commands": [ - "INSERT", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json deleted file mode 100644 index dfdbfe79..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/insert/insert-with-subquery-and-alias.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "INSERT INTO user_logins (user_id, login_time) SELECT u.id, NOW() FROM users u WHERE u.active;", - "outputs": [ - { - "expected": "INSERT INTO user_logins ( user_id, login_time ) SELECT u.id, NOW ( ) FROM users u WHERE u.active", - "statement_metadata": { - "size": 28, - "tables": [ - "user_logins", - "users" - ], - "commands": [ - "INSERT", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json deleted file mode 100644 index 3119a2c0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/aggregate-functions-count.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT COUNT(*) AS total_users FROM users;", - "outputs": [ - { - "expected": "SELECT COUNT ( * ) FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT COUNT(*) FROM users", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json deleted file mode 100644 index 5fd8a336..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/basic_select_with_alias.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "input": "SELECT u.id AS user_id, u.name AS username FROM users u;", - "outputs": [ - { - "expected": "SELECT u.id, u.name FROM users u", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_sql_alias": true - }, - "expected": "SELECT u.id AS user_id, u.name AS username FROM users u" - }, - { - "normalizer_config": { - "keep_trailing_semicolon": true - }, - "expected": "SELECT u.id, u.name FROM users u;" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json deleted file mode 100644 index 6beb2c0a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/case-statements.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT name, CASE WHEN age < 18 THEN 'minor' ELSE 'adult' END FROM users;", - "outputs": [ - { - "expected": "SELECT name, CASE WHEN age < ? THEN ? ELSE ? END FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json deleted file mode 100644 index 34a1eb9e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/common-table-expressions-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH recursive_subordinates AS (\n SELECT id, manager_id FROM employees WHERE id = 1\n UNION ALL\n SELECT e.id, e.manager_id FROM employees e INNER JOIN recursive_subordinates rs ON rs.id = e.manager_id\n)\nSELECT * FROM recursive_subordinates;", - "outputs": [ - { - "expected": "WITH recursive_subordinates AS ( SELECT id, manager_id FROM employees WHERE id = ? UNION ALL SELECT e.id, e.manager_id FROM employees e INNER JOIN recursive_subordinates rs ON rs.id = e.manager_id ) SELECT * FROM recursive_subordinates", - "statement_metadata": { - "size": 41, - "tables": [ - "employees", - "recursive_subordinates" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json deleted file mode 100644 index aeaa4a1f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/cross-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM users CROSS JOIN cities;", - "outputs": [ - { - "expected": "SELECT * FROM users CROSS JOIN cities", - "statement_metadata": { - "size": 21, - "tables": [ - "users", - "cities" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json deleted file mode 100644 index 25d8e90d..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/distinct-on-expressions.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT DISTINCT ON (location) location, time FROM events ORDER BY location, time DESC;", - "outputs": [ - { - "expected": "SELECT DISTINCT ON ( location ) location, time FROM events ORDER BY location, time DESC", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json deleted file mode 100644 index a6b7f906..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/fetch-first-clause.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users ORDER BY created_at DESC FETCH FIRST 10 ROWS ONLY;", - "outputs": [ - { - "expected": "SELECT * FROM users ORDER BY created_at DESC FETCH FIRST ? ROWS ONLY", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json deleted file mode 100644 index b8299044..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/for-update-of.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT * FROM users WHERE last_login < NOW() - INTERVAL '1 year' FOR UPDATE OF users;", - "outputs": [ - { - "expected": "SELECT * FROM users WHERE last_login < NOW ( ) - INTERVAL ? FOR UPDATE OF users", - "statement_metadata": { - "size": 17, - "tables": [ - "users" - ], - "commands": [ - "SELECT", - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json deleted file mode 100644 index f3329ff7..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/full-outer-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM customers FULL OUTER JOIN orders ON customers.id = orders.customer_id;", - "outputs": [ - { - "expected": "SELECT * FROM customers FULL OUTER JOIN orders ON customers.id = orders.customer_id", - "statement_metadata": { - "size": 25, - "tables": [ - "customers", - "orders" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json deleted file mode 100644 index 58b73c1b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/group-by-having.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT status, COUNT(*) FROM orders GROUP BY status HAVING COUNT(*) > 1;", - "outputs": [ - { - "expected": "SELECT status, COUNT ( * ) FROM orders GROUP BY status HAVING COUNT ( * ) > ?", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT status, COUNT(*) FROM orders GROUP BY status HAVING COUNT(*) > ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json deleted file mode 100644 index c4cdfdce..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/json-field-access.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data->'customer'->>'name' AS customer_name FROM orders;", - "outputs": [ - { - "expected": "SELECT data -> ? ->> ? FROM orders", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json deleted file mode 100644 index 9cce21b8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-elements-text.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_array_elements_text(data->'tags') AS tag FROM products;", - "outputs": [ - { - "expected": "SELECT jsonb_array_elements_text ( data -> ? ) FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_array_elements_text(data -> ?) FROM products", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json deleted file mode 100644 index f0113eea..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-array-length.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_array_length(data->'tags') AS num_tags FROM products;", - "outputs": [ - { - "expected": "SELECT jsonb_array_length ( data -> ? ) FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_array_length(data -> ?) FROM products", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json deleted file mode 100644 index 27ecb7d9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contained-in-path.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload <@ '{\"events\": {\"type\": \"user_event\"}}';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload <@ ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json deleted file mode 100644 index 1485e099..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-key.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload ? 'user_id';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload ? ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json deleted file mode 100644 index 9fae952b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-contains-object-at-top-level.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM events WHERE payload @> '{\"type\": \"user_event\"}';", - "outputs": [ - { - "expected": "SELECT * FROM events WHERE payload @> ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json deleted file mode 100644 index c224a8b5..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-array-element.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data #- '{tags,0}' AS tags_without_first FROM products;", - "outputs": [ - { - "expected": "SELECT data #- ? FROM products", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json deleted file mode 100644 index ce40e284..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-key.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT data - 'temporary_field' AS cleaned_data FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT data - ? FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json deleted file mode 100644 index f452fd1c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-delete-path.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "input": "SELECT jsonb_set(data, '{info,address}', NULL) AS removed_address FROM users;", - "outputs": [ - { - "expected": "SELECT jsonb_set ( data, ?, ? ) FROM users", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_set(data, ?, NULL) FROM users", - "obfuscator_config": { - "replace_null": false - }, - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json deleted file mode 100644 index 9f7b5297..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path-text.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_extract_path_text(data, 'user', 'name') AS user_name FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_extract_path_text ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_extract_path_text(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json deleted file mode 100644 index 79e00e1b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-extract-path.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_extract_path(data, 'user', 'name') AS user_name FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_extract_path ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_extract_path(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json deleted file mode 100644 index 6b373c2e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-pretty-print.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_pretty(data) AS pretty_data FROM logs;", - "outputs": [ - { - "expected": "SELECT jsonb_pretty ( data ) FROM logs", - "statement_metadata": { - "size": 10, - "tables": [ - "logs" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_pretty(data) FROM logs", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json deleted file mode 100644 index 433bac49..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/jsonb-set-new-value.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT jsonb_set(data, '{user,name}', '\"John Doe\"') AS updated_data FROM user_profiles;", - "outputs": [ - { - "expected": "SELECT jsonb_set ( data, ?, ? ) FROM user_profiles", - "statement_metadata": { - "size": 19, - "tables": [ - "user_profiles" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT jsonb_set(data, ?, ?) FROM user_profiles", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json deleted file mode 100644 index feb22135..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/lateral-joins.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "SELECT u.name, json_agg(l) FROM users u, LATERAL (SELECT id, text FROM logs WHERE logs.user_id = u.id) AS l GROUP BY u.name;", - "outputs": [ - { - "expected": "SELECT u.name, json_agg ( l ) FROM users u, LATERAL ( SELECT id, text FROM logs WHERE logs.user_id = u.id ) GROUP BY u.name", - "statement_metadata": { - "size": 15, - "tables": [ - "users", - "logs" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT u.name, json_agg(l) FROM users u, LATERAL (SELECT id, text FROM logs WHERE logs.user_id = u.id) GROUP BY u.name", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json deleted file mode 100644 index 6bdafb4c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/limit-and-offset.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users ORDER BY created_at DESC LIMIT 10 OFFSET 20;", - "outputs": [ - { - "expected": "SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json deleted file mode 100644 index 437be5db..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/natural-joins.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM users NATURAL JOIN user_profiles;", - "outputs": [ - { - "expected": "SELECT * FROM users NATURAL JOIN user_profiles", - "statement_metadata": { - "size": 28, - "tables": [ - "users", - "user_profiles" - ], - "commands": [ - "SELECT", - "JOIN" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json deleted file mode 100644 index d9896d15..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-case-sensitive.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = 'Shipped'", - "outputs": [ - { - "expected": "SELECT OrderId, OrderDate, CustomerName FROM Sales.Orders WHERE OrderStatus = ?", - "statement_metadata": { - "size": 18, - "tables": ["Sales.Orders"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true - }, - "expected": "SELECT \"OrderId\", \"OrderDate\", \"CustomerName\" FROM \"Sales\".\"Orders\" WHERE \"OrderStatus\" = ?" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json deleted file mode 100644 index e7203e6f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/quoted-identifiers-special-characters.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE '%Gadget%'", - "outputs": [ - { - "expected": "SELECT * FROM Sales.Order-Details WHERE Product#Name LIKE ?", - "statement_metadata": { - "size": 25, - "tables": ["Sales.Order-Details"], - "commands": ["SELECT"], - "comments": [], - "procedures": [] - } - }, - { - "normalizer_config": { - "keep_identifier_quotation": true - }, - "expected": "SELECT * FROM \"Sales\".\"Order-Details\" WHERE \"Product#Name\" LIKE ?" - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json deleted file mode 100644 index 3c02bbca..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-in-clause-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM orders WHERE status IN ($1, $2, $3);", - "outputs": [ - { - "expected": "SELECT * FROM orders WHERE status IN ( ? )" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM orders WHERE status IN ( $1, $2, $3 )" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json deleted file mode 100644 index 08e468bb..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-multiple-conditions-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM products WHERE category = $1 AND price < $2;", - "outputs": [ - { - "expected": "SELECT * FROM products WHERE category = ? AND price < ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM products WHERE category = $1 AND price < $2" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json deleted file mode 100644 index 18007873..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/select-with-positional-parameter.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "SELECT * FROM users WHERE id = $1;", - "outputs": [ - { - "expected": "SELECT * FROM users WHERE id = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "SELECT * FROM users WHERE id = $1" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json deleted file mode 100644 index 9a34cbd9..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/self-joins.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT a.name, b.name FROM employees a, employees b WHERE a.manager_id = b.id;", - "outputs": [ - { - "expected": "SELECT a.name, b.name FROM employees a, employees b WHERE a.manager_id = b.id", - "statement_metadata": { - "size": 15, - "tables": [ - "employees" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json deleted file mode 100644 index af14f66e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-from.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "SELECT user_data.name FROM (SELECT name FROM users WHERE active = true) AS user_data;", - "outputs": [ - { - "expected": "SELECT user_data.name FROM ( SELECT name FROM users WHERE active = ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "SELECT user_data.name FROM ( SELECT name FROM users WHERE active = true )", - "obfuscator_config": { - "replace_boolean": false - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json deleted file mode 100644 index a8f2cea3..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-select.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT name, (SELECT COUNT(*) FROM orders WHERE orders.user_id = users.id) AS order_count FROM users;", - "outputs": [ - { - "expected": "SELECT name, ( SELECT COUNT ( * ) FROM orders WHERE orders.user_id = users.id ) FROM users", - "statement_metadata": { - "size": 17, - "tables": [ - "orders", - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json deleted file mode 100644 index a8516ffd..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/subquery-in-where.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "SELECT name FROM users WHERE id IN (SELECT user_id FROM orders WHERE total > 100);", - "outputs": [ - { - "expected": "SELECT name FROM users WHERE id IN ( SELECT user_id FROM orders WHERE total > ? )", - "statement_metadata": { - "size": 17, - "tables": [ - "users", - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json deleted file mode 100644 index c7a9c533..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/select/tablesample-bernoulli.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM users TABLESAMPLE BERNOULLI (10);", - "outputs": [ - { - "expected": "SELECT * FROM users TABLESAMPLE BERNOULLI ( ? )", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json deleted file mode 100644 index 907d8621..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-array-append.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET favorite_numbers = array_append(favorite_numbers, 42) WHERE id = 5;", - "outputs": [ - { - "expected": "UPDATE users SET favorite_numbers = array_append ( favorite_numbers, ? ) WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json deleted file mode 100644 index 7feabe9b..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-increment-numeric.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE accounts SET balance = balance + 100.0 WHERE user_id = 4;", - "outputs": [ - { - "expected": "UPDATE accounts SET balance = balance + ? WHERE user_id = ?", - "statement_metadata": { - "size": 14, - "tables": [ - "accounts" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json deleted file mode 100644 index 95ba8927..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-json-data.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "UPDATE events SET data = jsonb_set(data, '{location}', '\"New Location\"') WHERE data->>'event_id' = '123';", - "outputs": [ - { - "expected": "UPDATE events SET data = jsonb_set ( data, ?, ? ) WHERE data ->> ? = ?", - "statement_metadata": { - "size": 12, - "tables": [ - "events" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE events SET data = jsonb_set(data, ?, ?) WHERE data ->> ? = ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json deleted file mode 100644 index 04a7d986..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-multiple-fields-positional-parameters.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "input": "DELETE FROM sessions WHERE user_id = $1 AND expired = true;", - "outputs": [ - { - "expected": "DELETE FROM sessions WHERE user_id = ? AND expired = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false, - "replace_boolean": false - }, - "expected": "DELETE FROM sessions WHERE user_id = $1 AND expired = true" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json deleted file mode 100644 index 631b13c2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-positional-parameters.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "input": "UPDATE users SET email = $1 WHERE id = $2;", - "outputs": [ - { - "expected": "UPDATE users SET email = ? WHERE id = ?" - }, - { - "obfuscator_config": { - "replace_positional_parameter": false - }, - "expected": "UPDATE users SET email = $1 WHERE id = $2" - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json deleted file mode 100644 index e65fc2ee..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-returning.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "input": "UPDATE users SET last_login = NOW() WHERE id = 3 RETURNING last_login;", - "outputs": [ - { - "expected": "UPDATE users SET last_login = NOW ( ) WHERE id = ? RETURNING last_login", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE users SET last_login = NOW() WHERE id = ? RETURNING last_login", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json deleted file mode 100644 index 62d037b2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-multiple-columns.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET name = 'Jane Updated', email = 'jane.updated@example.com' WHERE id = 2;", - "outputs": [ - { - "expected": "UPDATE users SET name = ?, email = ? WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json deleted file mode 100644 index 85972d0c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-set-single-column.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET name = 'John Updated' WHERE id = 1;", - "outputs": [ - { - "expected": "UPDATE users SET name = ? WHERE id = ?", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json deleted file mode 100644 index 219899f2..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-using-join.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "UPDATE orders SET total = total * 0.9 FROM users WHERE users.id = orders.user_id AND users.status = 'VIP';", - "outputs": [ - { - "expected": "UPDATE orders SET total = total * ? FROM users WHERE users.id = orders.user_id AND users.status = ?", - "statement_metadata": { - "size": 17, - "tables": [ - "orders", - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json deleted file mode 100644 index 82cdf51a..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-case.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "UPDATE users SET status = CASE WHEN last_login < NOW() - INTERVAL '1 year' THEN 'inactive' ELSE status END;", - "outputs": [ - { - "expected": "UPDATE users SET status = CASE WHEN last_login < NOW ( ) - INTERVAL ? THEN ? ELSE status END", - "statement_metadata": { - "size": 11, - "tables": [ - "users" - ], - "commands": [ - "UPDATE" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json deleted file mode 100644 index 60fd0c4f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-cte.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "WITH updated AS (\n UPDATE users SET name = 'CTE Updated' WHERE id = 6 RETURNING *\n)\nSELECT * FROM updated;", - "outputs": [ - { - "expected": "WITH updated AS ( UPDATE users SET name = ? WHERE id = ? RETURNING * ) SELECT * FROM updated", - "statement_metadata": { - "size": 24, - "tables": [ - "users", - "updated" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json b/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json deleted file mode 100644 index a49aec33..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/postgresql/update/update-with-subquery.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "input": "UPDATE products SET price = (SELECT MAX(price) FROM products) * 0.9 WHERE name = 'Old Product';", - "outputs": [ - { - "expected": "UPDATE products SET price = ( SELECT MAX ( price ) FROM products ) * ? WHERE name = ?", - "statement_metadata": { - "size": 20, - "tables": [ - "products" - ], - "commands": [ - "UPDATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - }, - { - "expected": "UPDATE products SET price = (SELECT MAX(price) FROM products) * ? WHERE name = ?", - "normalizer_config": { - "remove_space_between_parentheses": true - } - } - ] -} \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/data-clone.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/data-clone.json deleted file mode 100644 index fa93a67e..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/data-clone.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "input": "CREATE TABLE new_table CLONE existing_table;", - "outputs": [ - { - "expected": "CREATE TABLE new_table CLONE existing_table", - "statement_metadata": { - "size": 34, - "tables": [ - "new_table", - "existing_table" - ], - "commands": [ - "CREATE", - "CLONE" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/external-data.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/external-data.json deleted file mode 100644 index 931a73d8..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/external-data.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "CREATE EXTERNAL TABLE ext_sales_data (sale_date DATE, product_id STRING, quantity_sold NUMBER) WITH LOCATION = @my_external_stage/sales_data/ FILE_FORMAT = (TYPE = 'CSV' FIELD_OPTIONALLY_ENCLOSED_BY = '\"');", - "outputs": [ - { - "expected": "CREATE EXTERNAL TABLE ext_sales_data ( sale_date DATE, product_id STRING, quantity_sold NUMBER ) WITH LOCATION = @my_external_stage/sales_data/ FILE_FORMAT = ( TYPE = ? FIELD_OPTIONALLY_ENCLOSED_BY = ? )", - "statement_metadata": { - "size": 20, - "tables": [ - "ext_sales_data" - ], - "commands": [ - "CREATE" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/listagg.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/listagg.json deleted file mode 100644 index 0c106807..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/listagg.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT LISTAGG(product_name, ', ') WITHIN GROUP (ORDER BY product_name) AS product_list FROM products WHERE category_id = 1;", - "outputs": [ - { - "expected": "SELECT LISTAGG ( product_name, ? ) WITHIN GROUP ( ORDER BY product_name ) FROM products WHERE category_id = ?", - "statement_metadata": { - "size": 14, - "tables": [ - "products" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/materialized-view.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/materialized-view.json deleted file mode 100644 index 3dbbfe4f..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/materialized-view.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "input": "CREATE MATERIALIZED VIEW mv_product_sales AS SELECT product_id, SUM(sales_amount) AS total_sales FROM sales GROUP BY product_id;", - "outputs": [ - { - "expected": "CREATE MATERIALIZED VIEW mv_product_sales AS SELECT product_id, SUM ( sales_amount ) FROM sales GROUP BY product_id", - "statement_metadata": { - "size": 17, - "tables": [ - "sales" - ], - "commands": [ - "CREATE", - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/semi-structured-data-types.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/semi-structured-data-types.json deleted file mode 100644 index 3153b66c..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/semi-structured-data-types.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT metadata:customerID::string AS customer_id FROM orders WHERE metadata:orderDate::date = '2023-01-01';", - "outputs": [ - { - "expected": "SELECT metadata : customerID :: string FROM orders WHERE metadata : orderDate :: date = ?", - "statement_metadata": { - "size": 12, - "tables": [ - "orders" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/stream.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/stream.json deleted file mode 100644 index 6b9aa761..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/stream.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "CREATE STREAM my_stream ON TABLE my_table;", - "outputs": [ - { - "expected": "CREATE STREAM my_stream ON TABLE my_table", - "statement_metadata": { - "size": 14, - "tables": [ - "my_table" - ], - "commands": [ - "CREATE" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/task.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/task.json deleted file mode 100644 index 3f2b13a0..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/task.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "input": "CREATE TASK /* my comment */ my_task WAREHOUSE = my_warehouse SCHEDULE = '15 MINUTE' AS INSERT INTO summary_table SELECT * FROM new_data_view;", - "outputs": [ - { - "expected": "CREATE TASK my_task WAREHOUSE = my_warehouse SCHEDULE = ? AS INSERT INTO summary_table SELECT * FROM new_data_view", - "statement_metadata": { - "size": 60, - "tables": [ - "summary_table", - "new_data_view" - ], - "commands": [ - "CREATE", - "INSERT", - "SELECT" - ], - "comments": ["/* my comment */"], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/time-travel.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/time-travel.json deleted file mode 100644 index 45415f39..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/time-travel.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "input": "SELECT * FROM my_table AT (TIMESTAMP => '2023-03-15 14:30:00');", - "outputs": [ - { - "expected": "SELECT * FROM my_table AT ( TIMESTAMP => ? )", - "statement_metadata": { - "size": 14, - "tables": [ - "my_table" - ], - "commands": [ - "SELECT" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/warehouse-controls.json b/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/warehouse-controls.json deleted file mode 100644 index 300cd2fe..00000000 --- a/vendor/github.com/DataDog/go-sqllexer/testdata/snowflake/test-cases/warehouse-controls.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "input": "ALTER WAREHOUSE my_warehouse SET WAREHOUSE_SIZE = 'X-LARGE';", - "outputs": [ - { - "expected": "ALTER WAREHOUSE my_warehouse SET WAREHOUSE_SIZE = ?", - "statement_metadata": { - "size": 5, - "tables": [ - ], - "commands": [ - "ALTER" - ], - "comments": [], - "procedures": [] - } - } - ] - } \ No newline at end of file diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfa..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c0..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d736..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der deleted file mode 100644 index d8c3710c..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d736..00000000 Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem deleted file mode 100644 index 493a5a26..00000000 --- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem deleted file mode 100644 index 55a7f10c..00000000 --- a/vendor/github.com/google/s2a-go/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem deleted file mode 100644 index 60c4cf06..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD -VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy -MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl -c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn -3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO -7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb -A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T -cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO -VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL -dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI -je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ -l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 -YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM -E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK -BTq2PBvc59T6OFLq ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem deleted file mode 100644 index 9d112d1e..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH -65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM -FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 -rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 -M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf -2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 -ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA -NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 -LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT -EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW -/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ -XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI -wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU -lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC -k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK -UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB -8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o -4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB -Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs -FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv -r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap -CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 -w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr -63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 -Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r -fVMrcL3jSf/W1Xh4TgtyoU8= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem deleted file mode 100644 index 44e436f6..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 -MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE -CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW -QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 -r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg -Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n -rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe -d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR -MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ -yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN -XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x -fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN -9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa -VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G -MTV7jmY9TBPtfhRuO/cG650+F+cw ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem deleted file mode 100644 index 68c60613..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL -BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx -GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 -MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE -AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh -HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn -H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK -GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA -Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe -LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA -AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G -A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB -AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT -EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn -JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe -2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs -HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI -4Wcvfz/isxgmH1UqIt3oc6ad ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem deleted file mode 100644 index b14ad0f7..00000000 --- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 -GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza -ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 -KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 -CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 -CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd -Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz -yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ -Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V -4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY -QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy -0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp -2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms -GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz -wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ -SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 -cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f -R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn -htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi -AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw -O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh -cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 -EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw -SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x -gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe -1ChfPP1AH+/75MJCvu6wQBQv ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem deleted file mode 100644 index ad1bad59..00000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x -CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE -AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 -MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE -CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe -2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW -HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB -cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 -5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z -ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA -ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG -lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ -XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt -Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 -ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU -gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem deleted file mode 100644 index bcf08e4f..00000000 --- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS -xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR -4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS -n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp -B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 -vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN -DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S -pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj -ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF -p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn -r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B -7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ -Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 -fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz -1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk -emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP -ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw -Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF -vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD -B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh -eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi -elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 -Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo -mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk -k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ -8x3gNkxJRb4NaLIoNzAhCoo= ------END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem deleted file mode 100644 index 0f98322c..00000000 --- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT -fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ -qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE -xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es -Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 -Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM -ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR -e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X -POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl -AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg -odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ -PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN -Dhm6uZM= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem deleted file mode 100644 index 81afea78..00000000 --- a/vendor/github.com/google/s2a-go/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs -8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO -QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk -XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA -Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc -gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf -LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl -jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 -4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q -Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P -nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 -drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE -duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 -L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG -06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm -eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD -uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 -lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL -a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb -hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ -7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j -r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 -eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD -B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz -7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go new file mode 100644 index 00000000..d4d6019f --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go @@ -0,0 +1,63 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//go:build go1.23 + +// Package iterator contains helper for working with iterators. It is meant for +// internal use only by the Go Client Libraries. +package iterator + +import ( + "iter" + + otherit "google.golang.org/api/iterator" +) + +// RangeAdapter transforms client iterator type into a [iter.Seq2] that can +// be used with Go's range expressions. +// +// This is for internal use only. +func RangeAdapter[T any](next func() (T, error)) iter.Seq2[T, error] { + var err error + return func(yield func(T, error) bool) { + for { + if err != nil { + return + } + var resp T + resp, err = next() + if err == otherit.Done { + return + } + if !yield(resp, err) { + return + } + } + } +} diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md index 62bd5c6d..4e995234 100644 --- a/vendor/github.com/philhofer/fwd/README.md +++ b/vendor/github.com/philhofer/fwd/README.md @@ -45,6 +45,15 @@ the write position by the length of the returned slice. This allows users to write directly to the end of the buffer. +## Portability + +Because it uses the unsafe package, there are theoretically +no promises about forward or backward portability. + +To stay compatible with tinygo 0.32, unsafestr() has been updated +to use unsafe.Slice() as suggested by +https://tinygo.org/docs/guides/compatibility, which also required +bumping go.mod to require at least go 1.20. ## Index diff --git a/vendor/github.com/philhofer/fwd/writer_tinygo.go b/vendor/github.com/philhofer/fwd/writer_tinygo.go index b060faf7..c98cd57f 100644 --- a/vendor/github.com/philhofer/fwd/writer_tinygo.go +++ b/vendor/github.com/philhofer/fwd/writer_tinygo.go @@ -4,16 +4,10 @@ package fwd import ( - "reflect" "unsafe" ) // unsafe cast string as []byte func unsafestr(b string) []byte { - l := uintptr(len(b)) - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Len: l, - Cap: l, - Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, - })) + return unsafe.Slice(unsafe.StringData(b), len(b)) } diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go index e265aa4f..47a8c183 100644 --- a/vendor/github.com/tinylib/msgp/msgp/defs.go +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -32,6 +32,10 @@ const ( last5 = 0x1f first3 = 0xe0 last7 = 0x7f + + // recursionLimit is the limit of recursive calls. + // This limits the call depth of dynamic code, like Skip and interface conversions. + recursionLimit = 100000 ) func isfixint(b byte) bool { diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go index 4f19359a..984cca32 100644 --- a/vendor/github.com/tinylib/msgp/msgp/errors.go +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -13,6 +13,10 @@ var ( // contain the contents of the message ErrShortBytes error = errShort{} + // ErrRecursion is returned when the maximum recursion limit is reached for an operation. + // This should only realistically be seen on adversarial data trying to exhaust the stack. + ErrRecursion error = errRecursion{} + // this error is only returned // if we reach code that should // be unreachable @@ -134,6 +138,11 @@ func (f errFatal) Resumable() bool { return false } func (f errFatal) withContext(ctx string) error { f.ctx = addCtx(f.ctx, ctx); return f } +type errRecursion struct{} + +func (e errRecursion) Error() string { return "msgp: recursion limit reached" } +func (e errRecursion) Resumable() bool { return false } + // ArrayError is an error returned // when decoding a fix-sized array // of the wrong size diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go index 0f2c3752..a6d91ede 100644 --- a/vendor/github.com/tinylib/msgp/msgp/file.go +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -1,5 +1,5 @@ -//go:build (linux || darwin || dragonfly || freebsd || netbsd || openbsd) && !appengine && !tinygo -// +build linux darwin dragonfly freebsd netbsd openbsd +//go:build (linux || darwin || dragonfly || freebsd || illumos || netbsd || openbsd) && !appengine && !tinygo +// +build linux darwin dragonfly freebsd illumos netbsd openbsd // +build !appengine // +build !tinygo diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go index 2bbb3ad1..dac0dba3 100644 --- a/vendor/github.com/tinylib/msgp/msgp/file_port.go +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -4,7 +4,7 @@ package msgp import ( - "io/ioutil" + "io" "os" ) @@ -21,7 +21,7 @@ func ReadFile(dst Unmarshaler, file *os.File) error { return u.DecodeMsg(NewReader(file)) } - data, err := ioutil.ReadAll(file) + data, err := io.ReadAll(file) if err != nil { return err } diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go index f817d775..d07a5fba 100644 --- a/vendor/github.com/tinylib/msgp/msgp/integers.go +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -1,5 +1,7 @@ package msgp +import "encoding/binary" + /* ---------------------------------- integer encoding utilities (inline-able) @@ -11,6 +13,8 @@ package msgp ---------------------------------- */ func putMint64(b []byte, i int64) { + _ = b[8] // bounds check elimination + b[0] = mint64 b[1] = byte(i >> 56) b[2] = byte(i >> 48) @@ -23,6 +27,8 @@ func putMint64(b []byte, i int64) { } func getMint64(b []byte) int64 { + _ = b[8] // bounds check elimination + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | (int64(b[3]) << 40) | (int64(b[4]) << 32) | (int64(b[5]) << 24) | (int64(b[6]) << 16) | @@ -30,6 +36,8 @@ func getMint64(b []byte) int64 { } func putMint32(b []byte, i int32) { + _ = b[4] // bounds check elimination + b[0] = mint32 b[1] = byte(i >> 24) b[2] = byte(i >> 16) @@ -38,20 +46,28 @@ func putMint32(b []byte, i int32) { } func getMint32(b []byte) int32 { + _ = b[4] // bounds check elimination + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) } func putMint16(b []byte, i int16) { + _ = b[2] // bounds check elimination + b[0] = mint16 b[1] = byte(i >> 8) b[2] = byte(i) } func getMint16(b []byte) (i int16) { + _ = b[2] // bounds check elimination + return (int16(b[1]) << 8) | int16(b[2]) } func putMint8(b []byte, i int8) { + _ = b[1] // bounds check elimination + b[0] = mint8 b[1] = byte(i) } @@ -61,6 +77,8 @@ func getMint8(b []byte) (i int8) { } func putMuint64(b []byte, u uint64) { + _ = b[8] // bounds check elimination + b[0] = muint64 b[1] = byte(u >> 56) b[2] = byte(u >> 48) @@ -73,6 +91,8 @@ func putMuint64(b []byte, u uint64) { } func getMuint64(b []byte) uint64 { + _ = b[8] // bounds check elimination + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | @@ -80,6 +100,8 @@ func getMuint64(b []byte) uint64 { } func putMuint32(b []byte, u uint32) { + _ = b[4] // bounds check elimination + b[0] = muint32 b[1] = byte(u >> 24) b[2] = byte(u >> 16) @@ -88,20 +110,28 @@ func putMuint32(b []byte, u uint32) { } func getMuint32(b []byte) uint32 { + _ = b[4] // bounds check elimination + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) } func putMuint16(b []byte, u uint16) { + _ = b[2] // bounds check elimination + b[0] = muint16 b[1] = byte(u >> 8) b[2] = byte(u) } func getMuint16(b []byte) uint16 { + _ = b[2] // bounds check elimination + return (uint16(b[1]) << 8) | uint16(b[2]) } func putMuint8(b []byte, u uint8) { + _ = b[1] // bounds check elimination + b[0] = muint8 b[1] = byte(u) } @@ -111,28 +141,15 @@ func getMuint8(b []byte) uint8 { } func getUnix(b []byte) (sec int64, nsec int32) { - sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | - (int64(b[2]) << 40) | (int64(b[3]) << 32) | - (int64(b[4]) << 24) | (int64(b[5]) << 16) | - (int64(b[6]) << 8) | (int64(b[7])) + sec = int64(binary.BigEndian.Uint64(b)) + nsec = int32(binary.BigEndian.Uint32(b[8:])) - nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) return } func putUnix(b []byte, sec int64, nsec int32) { - b[0] = byte(sec >> 56) - b[1] = byte(sec >> 48) - b[2] = byte(sec >> 40) - b[3] = byte(sec >> 32) - b[4] = byte(sec >> 24) - b[5] = byte(sec >> 16) - b[6] = byte(sec >> 8) - b[7] = byte(sec) - b[8] = byte(nsec >> 24) - b[9] = byte(nsec >> 16) - b[10] = byte(nsec >> 8) - b[11] = byte(nsec) + binary.BigEndian.PutUint64(b, uint64(sec)) + binary.BigEndian.PutUint32(b[8:], uint32(nsec)) } /* ----------------------------- @@ -141,12 +158,16 @@ func putUnix(b []byte, sec int64, nsec int32) { // write prefix and uint8 func prefixu8(b []byte, pre byte, sz uint8) { + _ = b[1] // bounds check elimination + b[0] = pre b[1] = byte(sz) } // write prefix and big-endian uint16 func prefixu16(b []byte, pre byte, sz uint16) { + _ = b[2] // bounds check elimination + b[0] = pre b[1] = byte(sz >> 8) b[2] = byte(sz) @@ -154,6 +175,8 @@ func prefixu16(b []byte, pre byte, sz uint16) { // write prefix and big-endian uint32 func prefixu32(b []byte, pre byte, sz uint32) { + _ = b[4] // bounds check elimination + b[0] = pre b[1] = byte(sz >> 24) b[2] = byte(sz >> 16) @@ -162,6 +185,8 @@ func prefixu32(b []byte, pre byte, sz uint32) { } func prefixu64(b []byte, pre byte, sz uint64) { + _ = b[8] // bounds check elimination + b[0] = pre b[1] = byte(sz >> 56) b[2] = byte(sz >> 48) diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go index 0e11e603..fe570373 100644 --- a/vendor/github.com/tinylib/msgp/msgp/json.go +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -109,6 +109,13 @@ func rwMap(dst jsWriter, src *Reader) (n int, err error) { return dst.WriteString("{}") } + // This is potentially a recursive call. + if done, err := src.recursiveCall(); err != nil { + return 0, err + } else { + defer done() + } + err = dst.WriteByte('{') if err != nil { return @@ -162,6 +169,13 @@ func rwArray(dst jsWriter, src *Reader) (n int, err error) { if err != nil { return } + // This is potentially a recursive call. + if done, err := src.recursiveCall(); err != nil { + return 0, err + } else { + defer done() + } + var sz uint32 var nn int sz, err = src.ReadArrayHeader() diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go index e6162d0a..88ec6045 100644 --- a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -9,12 +9,12 @@ import ( "time" ) -var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) +var unfuns [_maxtype]func(jsWriter, []byte, []byte, int) ([]byte, []byte, error) func init() { // NOTE(pmh): this is best expressed as a jump table, // but gc doesn't do that yet. revisit post-go1.5. - unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + unfuns = [_maxtype]func(jsWriter, []byte, []byte, int) ([]byte, []byte, error){ StrType: rwStringBytes, BinType: rwBytesBytes, MapType: rwMapBytes, @@ -51,7 +51,7 @@ func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { dst = bufio.NewWriterSize(w, 512) } for len(msg) > 0 && err == nil { - msg, scratch, err = writeNext(dst, msg, scratch) + msg, scratch, err = writeNext(dst, msg, scratch, 0) } if !cast && err == nil { err = dst.(*bufio.Writer).Flush() @@ -59,7 +59,7 @@ func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { return msg, err } -func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func writeNext(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { if len(msg) < 1 { return msg, scratch, ErrShortBytes } @@ -76,10 +76,13 @@ func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { t = TimeType } } - return unfuns[t](w, msg, scratch) + return unfuns[t](w, msg, scratch, depth) } -func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + if depth >= recursionLimit { + return msg, scratch, ErrRecursion + } sz, msg, err := ReadArrayHeaderBytes(msg) if err != nil { return msg, scratch, err @@ -95,7 +98,7 @@ func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error return msg, scratch, err } } - msg, scratch, err = writeNext(w, msg, scratch) + msg, scratch, err = writeNext(w, msg, scratch, depth+1) if err != nil { return msg, scratch, err } @@ -104,7 +107,10 @@ func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error return msg, scratch, err } -func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwMapBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + if depth >= recursionLimit { + return msg, scratch, ErrRecursion + } sz, msg, err := ReadMapHeaderBytes(msg) if err != nil { return msg, scratch, err @@ -120,7 +126,7 @@ func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } } - msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + msg, scratch, err = rwMapKeyBytes(w, msg, scratch, depth) if err != nil { return msg, scratch, err } @@ -128,7 +134,7 @@ func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) if err != nil { return msg, scratch, err } - msg, scratch, err = writeNext(w, msg, scratch) + msg, scratch, err = writeNext(w, msg, scratch, depth+1) if err != nil { return msg, scratch, err } @@ -137,17 +143,17 @@ func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { - msg, scratch, err := rwStringBytes(w, msg, scratch) +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch, depth) if err != nil { if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { - return rwBytesBytes(w, msg, scratch) + return rwBytesBytes(w, msg, scratch, depth) } } return msg, scratch, err } -func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwStringBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { str, msg, err := ReadStringZC(msg) if err != nil { return msg, scratch, err @@ -156,7 +162,7 @@ func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, erro return msg, scratch, err } -func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { bts, msg, err := ReadBytesZC(msg) if err != nil { return msg, scratch, err @@ -180,7 +186,7 @@ func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error return msg, scratch, err } -func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwNullBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { msg, err := ReadNilBytes(msg) if err != nil { return msg, scratch, err @@ -189,7 +195,7 @@ func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { b, msg, err := ReadBoolBytes(msg) if err != nil { return msg, scratch, err @@ -202,7 +208,7 @@ func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwIntBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { i, msg, err := ReadInt64Bytes(msg) if err != nil { return msg, scratch, err @@ -212,7 +218,7 @@ func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwUintBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { u, msg, err := ReadUint64Bytes(msg) if err != nil { return msg, scratch, err @@ -222,7 +228,7 @@ func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { var f float32 var err error f, msg, err = ReadFloat32Bytes(msg) @@ -234,7 +240,7 @@ func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, err return msg, scratch, err } -func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { var f float64 var err error f, msg, err = ReadFloat64Bytes(msg) @@ -246,7 +252,7 @@ func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, err return msg, scratch, err } -func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { var t time.Time var err error t, msg, err = ReadTimeBytes(msg) @@ -261,7 +267,7 @@ func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) return msg, scratch, err } -func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []byte, error) { var err error var et int8 et, err = peekExtension(msg) diff --git a/vendor/github.com/tinylib/msgp/msgp/purego.go b/vendor/github.com/tinylib/msgp/msgp/purego.go index 2cd35c3e..fe872341 100644 --- a/vendor/github.com/tinylib/msgp/msgp/purego.go +++ b/vendor/github.com/tinylib/msgp/msgp/purego.go @@ -1,5 +1,5 @@ -//go:build purego || appengine -// +build purego appengine +//go:build (purego && !unsafe) || appengine +// +build purego,!unsafe appengine package msgp diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go index e6d72f17..0215a5b9 100644 --- a/vendor/github.com/tinylib/msgp/msgp/read.go +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -143,8 +143,9 @@ type Reader struct { // is stateless; all the // buffering is done // within R. - R *fwd.Reader - scratch []byte + R *fwd.Reader + scratch []byte + recursionDepth int } // Read implements `io.Reader` @@ -190,6 +191,11 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) { return n, io.ErrShortWrite } + if done, err := m.recursiveCall(); err != nil { + return n, err + } else { + defer done() + } // for maps and slices, read elements for x := uintptr(0); x < o; x++ { var n2 int64 @@ -202,6 +208,18 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) { return n, nil } +// recursiveCall will increment the recursion depth and return an error if it is exceeded. +// If a nil error is returned, done must be called to decrement the counter. +func (m *Reader) recursiveCall() (done func(), err error) { + if m.recursionDepth >= recursionLimit { + return func() {}, ErrRecursion + } + m.recursionDepth++ + return func() { + m.recursionDepth-- + }, nil +} + // ReadFull implements `io.ReadFull` func (m *Reader) ReadFull(p []byte) (int, error) { return m.R.ReadFull(p) @@ -332,7 +350,12 @@ func (m *Reader) Skip() error { return err } - // for maps and slices, skip elements + // for maps and slices, skip elements with recursive call + if done, err := m.recursiveCall(); err != nil { + return err + } else { + defer done() + } for x := uintptr(0); x < o; x++ { err = m.Skip() if err != nil { @@ -1266,7 +1289,7 @@ func (m *Reader) ReadTime() (t time.Time, err error) { return } -// ReadIntf reads out the next object as a raw interface{}. +// ReadIntf reads out the next object as a raw interface{}/any. // Arrays are decoded as []interface{}, and maps are decoded // as map[string]interface{}. Integers are decoded as int64 // and unsigned integers are decoded as uint64. @@ -1333,6 +1356,13 @@ func (m *Reader) ReadIntf() (i interface{}, err error) { return case MapType: + // This can call back here, so treat as recursive call. + if done, err := m.recursiveCall(); err != nil { + return nil, err + } else { + defer done() + } + mp := make(map[string]interface{}) err = m.ReadMapStrIntf(mp) i = mp @@ -1358,6 +1388,13 @@ func (m *Reader) ReadIntf() (i interface{}, err error) { if err != nil { return } + + if done, err := m.recursiveCall(); err != nil { + return nil, err + } else { + defer done() + } + out := make([]interface{}, int(sz)) for j := range out { out[j], err = m.ReadIntf() diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go index a204ac4b..948faf1d 100644 --- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -1095,6 +1095,15 @@ func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { // out of 'b' and returns the map and remaining bytes. // If 'old' is non-nil, the values will be read into that map. func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + return readMapStrIntfBytesDepth(b, old, 0) +} + +func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (v map[string]interface{}, o []byte, err error) { + if depth >= recursionLimit { + err = ErrRecursion + return + } + var sz uint32 o = b sz, o, err = ReadMapHeaderBytes(o) @@ -1123,7 +1132,7 @@ func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]int return } var val interface{} - val, o, err = ReadIntfBytes(o) + val, o, err = readIntfBytesDepth(o, depth) if err != nil { return } @@ -1136,6 +1145,14 @@ func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]int // the next object out of 'b' as a raw interface{} and // return the remaining bytes. func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + return readIntfBytesDepth(b, 0) +} + +func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error) { + if depth >= recursionLimit { + err = ErrRecursion + return + } if len(b) < 1 { err = ErrShortBytes return @@ -1145,7 +1162,7 @@ func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { switch k { case MapType: - i, o, err = ReadMapStrIntfBytes(b, nil) + i, o, err = readMapStrIntfBytesDepth(b, nil, depth+1) return case ArrayType: @@ -1157,7 +1174,7 @@ func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { j := make([]interface{}, int(sz)) i = j for d := range j { - j[d], o, err = ReadIntfBytes(o) + j[d], o, err = readIntfBytesDepth(o, depth+1) if err != nil { return } @@ -1245,7 +1262,15 @@ func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { // // - [ErrShortBytes] (not enough bytes in b) // - [InvalidPrefixError] (bad encoding) +// - [ErrRecursion] (too deeply nested data) func Skip(b []byte) ([]byte, error) { + return skipDepth(b, 0) +} + +func skipDepth(b []byte, depth int) ([]byte, error) { + if depth >= recursionLimit { + return b, ErrRecursion + } sz, asz, err := getSize(b) if err != nil { return b, err @@ -1255,7 +1280,7 @@ func Skip(b []byte) ([]byte, error) { } b = b[sz:] for asz > 0 { - b, err = Skip(b) + b, err = skipDepth(b, depth+1) if err != nil { return b, err } diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go index 06e8d843..7d36bfb1 100644 --- a/vendor/github.com/tinylib/msgp/msgp/unsafe.go +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -1,5 +1,5 @@ -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build (!purego && !appengine) || (!appengine && purego && unsafe) +// +build !purego,!appengine !appengine,purego,unsafe package msgp diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go index 676a6efe..12606cc2 100644 --- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -1,6 +1,7 @@ package msgp import ( + "errors" "math" "reflect" "time" @@ -342,10 +343,10 @@ func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { // provided []byte. 'i' must be one of the following: // - 'nil' // - A bool, float, string, []byte, int, uint, or complex -// - A map[string]interface{} or map[string]string +// - A map[string]T where T is another supported type // - A []T, where T is another supported type // - A *T, where T is another supported type -// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Marshaler interface // - A type that satisfies the msgp.Extension interface func AppendIntf(b []byte, i interface{}) ([]byte, error) { if i == nil { @@ -395,6 +396,8 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) { return AppendUint64(b, i), nil case time.Time: return AppendTime(b, i), nil + case time.Duration: + return AppendDuration(b, i), nil case map[string]interface{}: return AppendMapStrIntf(b, i) case map[string]string: @@ -414,6 +417,21 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) { var err error v := reflect.ValueOf(i) switch v.Kind() { + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return b, errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + b = AppendMapHeader(b, uint32(len(ks))) + for _, key := range ks { + val := v.MapIndex(key) + b = AppendString(b, key.String()) + b, err = AppendIntf(b, val.Interface()) + if err != nil { + return nil, err + } + } + return b, nil case reflect.Array, reflect.Slice: l := v.Len() b = AppendArrayHeader(b, uint32(l)) diff --git a/vendor/github.com/xanzy/go-gitlab/audit_events.go b/vendor/github.com/xanzy/go-gitlab/audit_events.go index de312e56..f51415c7 100644 --- a/vendor/github.com/xanzy/go-gitlab/audit_events.go +++ b/vendor/github.com/xanzy/go-gitlab/audit_events.go @@ -14,6 +14,7 @@ type AuditEvent struct { AuthorID int `json:"author_id"` EntityID int `json:"entity_id"` EntityType string `json:"entity_type"` + EventName string `json:"event_name"` Details AuditEventDetails `json:"details"` CreatedAt *time.Time `json:"created_at"` EventType string `json:"event_type"` @@ -42,6 +43,7 @@ type AuditEventDetails struct { IPAddress string `json:"ip_address"` EntityPath string `json:"entity_path"` FailedLogin string `json:"failed_login"` + EventName string `json:"event_name"` } // AuditEventsService handles communication with the project/group/instance diff --git a/vendor/github.com/xanzy/go-gitlab/environments.go b/vendor/github.com/xanzy/go-gitlab/environments.go index b6d902f8..7ec0fa63 100644 --- a/vendor/github.com/xanzy/go-gitlab/environments.go +++ b/vendor/github.com/xanzy/go-gitlab/environments.go @@ -34,16 +34,20 @@ type EnvironmentsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/environments.html type Environment struct { - ID int `json:"id"` - Name string `json:"name"` - Slug string `json:"slug"` - State string `json:"state"` - Tier string `json:"tier"` - ExternalURL string `json:"external_url"` - Project *Project `json:"project"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - LastDeployment *Deployment `json:"last_deployment"` + ID int `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Description string `json:"description"` + State string `json:"state"` + Tier string `json:"tier"` + ExternalURL string `json:"external_url"` + Project *Project `json:"project"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + LastDeployment *Deployment `json:"last_deployment"` + ClusterAgent *Agent `json:"cluster_agent"` + KubernetesNamespace string `json:"kubernetes_namespace"` + FluxResourcePath string `json:"flux_resource_path"` } func (env Environment) String() string { @@ -117,9 +121,13 @@ func (s *EnvironmentsService) GetEnvironment(pid interface{}, environment int, o // GitLab API docs: // https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment type CreateEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` + FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` } // CreateEnvironment adds an environment to a project. This is an idempotent @@ -155,9 +163,13 @@ func (s *EnvironmentsService) CreateEnvironment(pid interface{}, opt *CreateEnvi // GitLab API docs: // https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment type EditEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` + ClusterAgentID *int `url:"cluster_agent_id,omitempty" json:"cluster_agent_id,omitempty"` + KubernetesNamespace *string `url:"kubernetes_namespace,omitempty" json:"kubernetes_namespace,omitempty"` + FluxResourcePath *string `url:"flux_resource_path,omitempty" json:"flux_resource_path,omitempty"` } // EditEnvironment updates a project team environment to a specified access level.. diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go index c4a8e4ae..7266836a 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go +++ b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go @@ -721,6 +721,10 @@ type MergeEvent struct { Previous int `json:"previous"` Current int `json:"current"` } `json:"last_edited_by_id"` + MergeStatus struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"merge_status"` MilestoneID struct { Previous int `json:"previous"` Current int `json:"current"` diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/github.com/xanzy/go-gitlab/group_members.go index cdf225c3..c18b180e 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_members.go +++ b/vendor/github.com/xanzy/go-gitlab/group_members.go @@ -30,17 +30,6 @@ type GroupMembersService struct { client *Client } -// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -// Gitlab MR for API change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20357 -// Gitlab MR for API Doc change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25652 -type GroupMemberSAMLIdentity struct { - ExternUID string `json:"extern_uid"` - Provider string `json:"provider"` - SAMLProviderID int `json:"saml_provider_id"` -} - // GroupMember represents a GitLab group member. // // GitLab API docs: https://docs.gitlab.com/ee/api/members.html @@ -59,6 +48,50 @@ type GroupMember struct { MemberRole *MemberRole `json:"member_role"` } +// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +type GroupMemberSAMLIdentity struct { + ExternUID string `json:"extern_uid"` + Provider string `json:"provider"` + SAMLProviderID int `json:"saml_provider_id"` +} + +// BillableGroupMember represents a GitLab billable group member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group +type BillableGroupMember struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + Email string `json:"email"` + LastActivityOn *ISOTime `json:"last_activity_on"` + MembershipType string `json:"membership_type"` + Removable bool `json:"removable"` + CreatedAt *time.Time `json:"created_at"` + IsLastOwner bool `json:"is_last_owner"` + LastLoginAt *time.Time `json:"last_login_at"` +} + +// BillableUserMembership represents a Membership of a billable user of a group +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-memberships-for-a-billable-member-of-a-group +type BillableUserMembership struct { + ID int `json:"id"` + SourceID int `json:"source_id"` + SourceFullName string `json:"source_full_name"` + SourceMembersURL string `json:"source_members_url"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *time.Time `json:"expires_at"` + AccessLevel *AccessLevelDetails `json:"access_level"` +} + // ListGroupMembersOptions represents the available ListGroupMembers() and // ListAllGroupMembers() options. // @@ -184,25 +217,6 @@ func (s *GroupMembersService) GetInheritedGroupMember(gid interface{}, user int, return gm, resp, err } -// BillableGroupMember represents a GitLab billable group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group -type BillableGroupMember struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - Email string `json:"email"` - LastActivityOn *ISOTime `json:"last_activity_on"` - MembershipType string `json:"membership_type"` - Removable bool `json:"removable"` - CreatedAt *time.Time `json:"created_at"` - IsLastOwner bool `json:"is_last_owner"` - LastLoginAt *time.Time `json:"last_login_at"` -} - // ListBillableGroupMembersOptions represents the available ListBillableGroupMembers() options. // // GitLab API docs: @@ -239,6 +253,32 @@ func (s *GroupsService) ListBillableGroupMembers(gid interface{}, opt *ListBilla return bgm, resp, nil } +// ListMembershipsForBillableGroupMember Gets a list of memberships for a billable member of a group. +// Lists all projects and groups a user is a member of. Only projects and groups within the group hierarchy are included. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-memberships-for-a-billable-member-of-a-group +func (s *GroupsService) ListMembershipsForBillableGroupMember(gid interface{}, user int, options ...RequestOptionFunc) ([]*BillableUserMembership, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/billable_members/%d/memberships", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var bum []*BillableUserMembership + resp, err := s.client.Do(req, &bum) + if err != nil { + return nil, resp, err + } + + return bum, resp, nil +} + // RemoveBillableGroupMember removes a given group members that count as billable. // // GitLab API docs: diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go index 3ccb41b5..1360057a 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go +++ b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go @@ -31,18 +31,60 @@ type GroupServiceAccount struct { UserName string `json:"username"` } -// CreateServiceAccount create a new service account user for a group. +// ListServiceAccountsOptions represents the available ListServiceAccounts() options. // -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +type ListServiceAccountsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListServiceAccounts gets a list of service acxcounts. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var sa []*GroupServiceAccount + resp, err := s.client.Do(req, &sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountOptions represents the available CreateServiceAccount() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user +type CreateServiceAccountOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// Creates a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) if err != nil { return nil, nil, err } @@ -60,7 +102,7 @@ func (s *GroupsService) CreateServiceAccount(gid interface{}, options ...Request // CreateServiceAccountPersonalAccessToken() options. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user type CreateServiceAccountPersonalAccessTokenOptions struct { Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` Name *string `url:"name,omitempty" json:"name,omitempty"` @@ -71,7 +113,7 @@ type CreateServiceAccountPersonalAccessTokenOptions struct { // service account user for a group. // // GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { group, err := parseID(gid) if err != nil { @@ -117,3 +159,23 @@ func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, return pat, resp, nil } + +// DeleteServiceAccount Deletes a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user +func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go index 52fd13f6..0e85961d 100644 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ b/vendor/github.com/xanzy/go-gitlab/groups.go @@ -39,42 +39,38 @@ type GroupsService struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html type Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - MembershipLock bool `json:"membership_lock"` - Visibility VisibilityValue `json:"visibility"` - LFSEnabled bool `json:"lfs_enabled"` - DefaultBranchProtectionDefaults struct { - AllowedToPush []*GroupAccessLevel `json:"allowed_to_push"` - AllowForcePush bool `json:"allow_force_push"` - AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge"` - DeveloperCanInitialPush bool `json:"developer_can_initial_push"` - } `json:"default_branch_protection_defaults"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - RequestAccessEnabled bool `json:"request_access_enabled"` - RepositoryStorage string `json:"repository_storage"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - FileTemplateProjectID int `json:"file_template_project_id"` - ParentID int `json:"parent_id"` - Projects []*Project `json:"projects"` - Statistics *Statistics `json:"statistics"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ShareWithGroupLock bool `json:"share_with_group_lock"` - RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` - EmailsEnabled bool `json:"emails_enabled"` - MentionsDisabled bool `json:"mentions_disabled"` - RunnersToken string `json:"runners_token"` - SharedProjects []*Project `json:"shared_projects"` - SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` - SharedWithGroups []struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + MembershipLock bool `json:"membership_lock"` + Visibility VisibilityValue `json:"visibility"` + LFSEnabled bool `json:"lfs_enabled"` + DefaultBranch string `json:"default_branch"` + DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + RequestAccessEnabled bool `json:"request_access_enabled"` + RepositoryStorage string `json:"repository_storage"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + FileTemplateProjectID int `json:"file_template_project_id"` + ParentID int `json:"parent_id"` + Projects []*Project `json:"projects"` + Statistics *Statistics `json:"statistics"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ShareWithGroupLock bool `json:"share_with_group_lock"` + RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` + EmailsEnabled bool `json:"emails_enabled"` + MentionsDisabled bool `json:"mentions_disabled"` + RunnersToken string `json:"runners_token"` + SharedProjects []*Project `json:"shared_projects"` + SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` + SharedWithGroups []struct { GroupID int `json:"group_id"` GroupName string `json:"group_name"` GroupFullPath string `json:"group_full_path"` @@ -100,6 +96,17 @@ type Group struct { DefaultBranchProtection int `json:"default_branch_protection"` } +// BranchProtectionDefaults represents default Git protected branch permissions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaults struct { + AllowedToPush []*GroupAccessLevel `json:"allowed_to_push,omitempty"` + AllowForcePush bool `json:"allow_force_push,omitempty"` + AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` +} + // GroupAccessLevel represents default branch protection defaults access levels. // // GitLab API docs: @@ -358,6 +365,7 @@ type CreateGroupOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Path *string `url:"path,omitempty" json:"path,omitempty"` Avatar *GroupAvatar `url:"-" json:"-"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` @@ -393,7 +401,7 @@ type CreateGroupOptions struct { type DefaultBranchProtectionDefaultsOptions struct { AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge.omitempty" json:"allowed_to_merge.omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` } @@ -502,6 +510,7 @@ type UpdateGroupOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` Path *string `url:"path,omitempty" json:"path,omitempty"` Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/pages.go b/vendor/github.com/xanzy/go-gitlab/pages.go index 617b0ba4..81be8ba6 100644 --- a/vendor/github.com/xanzy/go-gitlab/pages.go +++ b/vendor/github.com/xanzy/go-gitlab/pages.go @@ -19,12 +19,33 @@ package gitlab import ( "fmt" "net/http" + "time" ) type PagesService struct { client *Client } +// Pages represents the Pages of a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages.html +type Pages struct { + URL string `json:"url"` + IsUniqueDomainEnabled bool `json:"is_unique_domain_enabled"` + ForceHTTPS bool `json:"force_https"` + Deployments []*PagesDeployment `json:"deployments"` +} + +// PagesDeployment represents a Pages deployment. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages.html +type PagesDeployment struct { + CreatedAt time.Time `json:"created_at"` + URL string `json:"url"` + PathPrefix string `json:"path_prefix"` + RootDirectory string `json:"root_directory"` +} + // UnpublishPages unpublished pages. The user must have admin privileges. // // GitLab API docs: @@ -43,3 +64,29 @@ func (s *PagesService) UnpublishPages(gid interface{}, options ...RequestOptionF return s.client.Do(req, nil) } + +// GetPages lists Pages settings for a project. The user must have at least +// maintainer privileges. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/pages.html#get-pages-settings-for-a-project +func (s *PagesService) GetPages(gid interface{}, options ...RequestOptionFunc) (*Pages, *Response, error) { + project, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Pages) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go index 6df4eff2..424196b5 100644 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ b/vendor/github.com/xanzy/go-gitlab/projects.go @@ -66,6 +66,7 @@ type Project struct { ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` CreatedAt *time.Time `json:"created_at,omitempty"` + UpdatedAt *time.Time `json:"updated_at,omitempty"` LastActivityAt *time.Time `json:"last_activity_at,omitempty"` CreatorID int `json:"creator_id"` Namespace *ProjectNamespace `json:"namespace"` @@ -83,6 +84,7 @@ type Project struct { StarCount int `json:"star_count"` RunnersToken string `json:"runners_token"` AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` + AllowPipelineTriggerApproveDeployment bool `json:"allow_pipeline_trigger_approve_deployment"` OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` @@ -309,6 +311,7 @@ type ProjectApprovalRule struct { ID int `json:"id"` Name string `json:"name"` RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` EligibleApprovers []*BasicUser `json:"eligible_approvers"` ApprovalsRequired int `json:"approvals_required"` Users []*BasicUser `json:"users"` @@ -1043,6 +1046,44 @@ func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionF return p, resp, nil } +// ListProjectInvidedGroupOptions represents the available +// ListProjectsInvitedGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +type ListProjectInvidedGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// ListProjectsInvitedGroups lists invited groups of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pg []*ProjectGroup + resp, err := s.client.Do(req, &pg) + if err != nil { + return nil, resp, err + } + + return pg, resp, nil +} + // UnstarProject unstars a given project. // // GitLab API docs: @@ -1120,18 +1161,28 @@ func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...RequestOp return p, resp, nil } +// DeleteProjectOptions represents the available DeleteProject() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project +type DeleteProjectOptions struct { + FullPath *string `url:"full_path" json:"full_path"` + PermanentlyRemove *bool `url:"permanently_remove" json:"permanently_remove"` +} + // DeleteProject removes a project including all associated resources // (issues, merge requests etc.) // -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-project -func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project +func (s *ProjectsService) DeleteProject(pid interface{}, opt *DeleteProjectOptions, options ...RequestOptionFunc) (*Response, error) { project, err := parseID(pid) if err != nil { return nil, err } u := fmt.Sprintf("projects/%s", PathEscape(project)) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) if err != nil { return nil, err } @@ -1139,7 +1190,7 @@ func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptio return s.client.Do(req, nil) } -// ShareWithGroupOptions represents options to share project with groups +// ShareWithGroupOptions represents the available SharedWithGroup() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group type ShareWithGroupOptions struct { @@ -1232,6 +1283,7 @@ type ProjectHook struct { DeploymentEvents bool `json:"deployment_events"` ReleasesEvents bool `json:"releases_events"` EnableSSLVerification bool `json:"enable_ssl_verification"` + AlertStatus string `json:"alert_status"` CreatedAt *time.Time `json:"created_at"` ResourceAccessTokenEvents bool `json:"resource_access_token_events"` CustomWebhookTemplate string `json:"custom_webhook_template"` @@ -1442,8 +1494,8 @@ func (s *ProjectsService) TriggerTestProjectHook(pid interface{}, hook int, even return s.client.Do(req, nil) } -// SetHookCustomHeaderOptions represents a project or group hook custom header. -// If the header isn't present, it will be created. +// SetHookCustomHeaderOptions represents the available SetProjectCustomHeader() +// options. // // GitLab API docs: // https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header @@ -1888,9 +1940,11 @@ func (s *ProjectsService) ChangeApprovalConfiguration(pid interface{}, opt *Chan return pa, resp, nil } -// GetProjectApprovalRulesListsOptions represents the available GetProjectApprovalRules() options. +// GetProjectApprovalRulesListsOptions represents the available +// GetProjectApprovalRules() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules type GetProjectApprovalRulesListsOptions ListOptions // GetProjectApprovalRules looks up the list of project level approver rules. @@ -2143,7 +2197,8 @@ func (s *ProjectsService) StartMirroringProject(pid interface{}, options ...Requ // TransferProjectOptions represents the available TransferProject() options. // -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace type TransferProjectOptions struct { Namespace interface{} `url:"namespace,omitempty" json:"namespace,omitempty"` } diff --git a/vendor/github.com/xanzy/go-gitlab/protected_branches.go b/vendor/github.com/xanzy/go-gitlab/protected_branches.go index d13f57a6..e88c7aea 100644 --- a/vendor/github.com/xanzy/go-gitlab/protected_branches.go +++ b/vendor/github.com/xanzy/go-gitlab/protected_branches.go @@ -54,6 +54,7 @@ type BranchAccessDescription struct { ID int `json:"id"` AccessLevel AccessLevelValue `json:"access_level"` AccessLevelDescription string `json:"access_level_description"` + DeployKeyID int `json:"deploy_key_id"` UserID int `json:"user_id"` GroupID int `json:"group_id"` } diff --git a/vendor/github.com/xanzy/go-gitlab/runners.go b/vendor/github.com/xanzy/go-gitlab/runners.go index 5224cf91..8c255f79 100644 --- a/vendor/github.com/xanzy/go-gitlab/runners.go +++ b/vendor/github.com/xanzy/go-gitlab/runners.go @@ -52,19 +52,20 @@ type Runner struct { // // GitLab API docs: https://docs.gitlab.com/ee/api/runners.html type RunnerDetails struct { - Paused bool `json:"paused"` - Architecture string `json:"architecture"` - Description string `json:"description"` - ID int `json:"id"` - IPAddress string `json:"ip_address"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - ContactedAt *time.Time `json:"contacted_at"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Platform string `json:"platform"` - Projects []struct { + Paused bool `json:"paused"` + Architecture string `json:"architecture"` + Description string `json:"description"` + ID int `json:"id"` + IPAddress string `json:"ip_address"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + ContactedAt *time.Time `json:"contacted_at"` + MaintenanceNote string `json:"maintenance_note"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Platform string `json:"platform"` + Projects []struct { ID int `json:"id"` Name string `json:"name"` NameWithNamespace string `json:"name_with_namespace"` @@ -173,13 +174,14 @@ func (s *RunnersService) GetRunnerDetails(rid interface{}, options ...RequestOpt // GitLab API docs: // https://docs.gitlab.com/ee/api/runners.html#update-runners-details type UpdateRunnerDetailsOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` - RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` - Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` - AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` + RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` + Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` + AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` + MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) Active *bool `url:"active,omitempty" json:"active,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go index a7eeb72d..56eeca86 100644 --- a/vendor/github.com/xanzy/go-gitlab/services.go +++ b/vendor/github.com/xanzy/go-gitlab/services.go @@ -767,6 +767,103 @@ func (s *ServicesService) DeleteGithubService(pid interface{}, options ...Reques return s.client.Do(req, nil) } +// HarborService represents the Harbor service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#harbor +type HarborService struct { + Service + Properties *HarborServiceProperties `json:"properties"` +} + +// HarborServiceProperties represents Harbor specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#harbor +type HarborServiceProperties struct { + URL string `json:"url"` + ProjectName string `json:"project_name"` + Username string `json:"username"` + Password string `json:"password"` + UseInheritedSettings bool `json:"use_inherited_settings"` +} + +// GetHarborService gets Harbor service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-harbor-settings +func (s *ServicesService) GetHarborService(pid interface{}, options ...RequestOptionFunc) (*HarborService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(HarborService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetHarborServiceOptions represents the available SetHarborService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-harbor +type SetHarborServiceOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// SetHarborService sets Harbor service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-harbor +func (s *ServicesService) SetHarborService(pid interface{}, opt *SetHarborServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteHarborService deletes Harbor service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-harbor +func (s *ServicesService) DeleteHarborService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/harbor", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // SlackApplication represents GitLab for slack application settings. // // GitLab API docs: @@ -1102,12 +1199,22 @@ type JiraService struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/integrations.html#jira type JiraServiceProperties struct { - URL string `json:"url"` - APIURL string `json:"api_url"` - ProjectKeys []string `json:"project_keys" ` - Username string `json:"username" ` - Password string `json:"password" ` - JiraIssueTransitionID string `json:"jira_issue_transition_id"` + URL string `json:"url"` + APIURL string `json:"api_url"` + Username string `json:"username" ` + Password string `json:"password" ` + Active bool `json:"active"` + JiraAuthType int `json:"jira_auth_type"` + JiraIssuePrefix string `json:"jira_issue_prefix"` + JiraIssueRegex string `json:"jira_issue_regex"` + JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` + JiraIssueTransitionID string `json:"jira_issue_transition_id"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + IssuesEnabled bool `json:"issues_enabled"` + ProjectKeys []string `json:"project_keys" ` + UseInheritedSettings bool `json:"use_inherited_settings"` // Deprecated: This parameter was removed in GitLab 17.0 ProjectKey string `json:"project_key" ` @@ -1152,7 +1259,7 @@ func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOpti if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -1204,7 +1311,7 @@ func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOpt if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodPut, u, opt, options) if err != nil { @@ -1223,7 +1330,7 @@ func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestO if err != nil { return nil, err } - u := fmt.Sprintf("projects/%s/services/jira", PathEscape(project)) + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { @@ -1748,6 +1855,101 @@ func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...Re return s.client.Do(req, nil) } +// RedmineService represents the Redmine service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#redmine +type RedmineService struct { + Service + Properties *RedmineServiceProperties `json:"properties"` +} + +// RedmineServiceProperties represents Redmine specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#redmine +type RedmineServiceProperties struct { + NewIssueURL string `json:"new_issue_url"` + ProjectURL string `json:"project_url"` + IssuesURL string `json:"issues_url"` + UseInheritedSettings BoolValue `json:"use_inherited_settings"` +} + +// GetRedmineService gets Redmine service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-redmine-settings +func (s *ServicesService) GetRedmineService(pid interface{}, options ...RequestOptionFunc) (*RedmineService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(RedmineService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetRedmineServiceOptions represents the available SetRedmineService(). +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-redmine +type SetRedmineServiceOptions struct { + NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` + ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` + IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` +} + +// SetRedmineService sets Redmine service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-redmine +func (s *ServicesService) SetRedmineService(pid interface{}, opt *SetRedmineServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteRedmineService deletes Redmine service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-redmine +func (s *ServicesService) DeleteRedmineService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/redmine", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // SlackService represents Slack service settings. // // GitLab API docs: diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go index f4d67a4f..d2c319f3 100644 --- a/vendor/github.com/xanzy/go-gitlab/settings.go +++ b/vendor/github.com/xanzy/go-gitlab/settings.go @@ -43,408 +43,408 @@ type SettingsService struct { // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 // https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 type Settings struct { - ID int `json:"id"` - AbuseNotificationEmail string `json:"abuse_notification_email"` - AdminMode bool `json:"admin_mode"` - AfterSignOutPath string `json:"after_sign_out_path"` - AfterSignUpText string `json:"after_sign_up_text"` - AkismetAPIKey string `json:"akismet_api_key"` - AkismetEnabled bool `json:"akismet_enabled"` - AllowAccountDeletion bool `json:"allow_account_deletion"` - AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` - AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` - AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` - AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` - AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` - ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` - AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` - AssetProxyEnabled bool `json:"asset_proxy_enabled"` - AssetProxyURL string `json:"asset_proxy_url"` - AssetProxySecretKey string `json:"asset_proxy_secret_key"` - AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` - AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` - AutoDevOpsDomain string `json:"auto_devops_domain"` - AutoDevOpsEnabled bool `json:"auto_devops_enabled"` - AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` - BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` - BulkImportEnabled bool `json:"bulk_import_enabled"` - BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` - CanCreateGroup bool `json:"can_create_group"` - CheckNamespacePlan bool `json:"check_namespace_plan"` - CIMaxIncludes int `json:"ci_max_includes"` - CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` - CommitEmailHostname string `json:"commit_email_hostname"` - ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` - ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` - ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` - ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` - ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` - ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` - ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` - ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` - ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` - ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` - ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` - ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` - ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` - ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` - CreatedAt *time.Time `json:"created_at"` - CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` - DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DSAKeyRestriction int `json:"dsa_key_restriction"` - DeactivateDormantUsers bool `json:"deactivate_dormant_users"` - DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` - DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` - DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` - DefaultBranchName string `json:"default_branch_name"` - DefaultBranchProtection int `json:"default_branch_protection"` - DefaultBranchProtectionDefaults BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` - DefaultCiConfigPath string `json:"default_ci_config_path"` - DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` - DefaultPreferredLanguage string `json:"default_preferred_language"` - DefaultProjectCreation int `json:"default_project_creation"` - DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` - DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultProjectsLimit int `json:"default_projects_limit"` - DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` - DelayedGroupDeletion bool `json:"delayed_group_deletion"` - DelayedProjectDeletion bool `json:"delayed_project_deletion"` - DeleteInactiveProjects bool `json:"delete_inactive_projects"` - DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` - DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` - DiagramsnetEnabled bool `json:"diagramsnet_enabled"` - DiagramsnetURL string `json:"diagramsnet_url"` - DiffMaxFiles int `json:"diff_max_files"` - DiffMaxLines int `json:"diff_max_lines"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` - DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` - DisableFeedToken bool `json:"disable_feed_token"` - DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` - DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` - DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` - DomainAllowlist []string `json:"domain_allowlist"` - DomainDenylist []string `json:"domain_denylist"` - DomainDenylistEnabled bool `json:"domain_denylist_enabled"` - DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` - DuoFeaturesEnabled bool `json:"duo_features_enabled"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` - EKSAccessKeyID string `json:"eks_access_key_id"` - EKSAccountID string `json:"eks_account_id"` - EKSIntegrationEnabled bool `json:"eks_integration_enabled"` - EKSSecretAccessKey string `json:"eks_secret_access_key"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` - ElasticsearchAWS bool `json:"elasticsearch_aws"` - ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` - ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` - ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` - ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` - ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` - ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` - ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` - ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` - ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` - ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` - ElasticsearchIndexing bool `json:"elasticsearch_indexing"` - ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` - ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` - ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` - ElasticsearchPassword string `json:"elasticsearch_password"` - ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchReplicas int `json:"elasticsearch_replicas"` - ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` - ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchShards int `json:"elasticsearch_shards"` - ElasticsearchURL []string `json:"elasticsearch_url"` - ElasticsearchUsername string `json:"elasticsearch_username"` - ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` - EmailAdditionalText string `json:"email_additional_text"` - EmailAuthorInBody bool `json:"email_author_in_body"` - EmailConfirmationSetting string `json:"email_confirmation_setting"` - EmailRestrictions string `json:"email_restrictions"` - EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` - EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` - EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` - EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` - EnforcePATExpiration bool `json:"enforce_pat_expiration"` - EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` - EnforceTerms bool `json:"enforce_terms"` - ExternalAuthClientCert string `json:"external_auth_client_cert"` - ExternalAuthClientKey string `json:"external_auth_client_key"` - ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` - ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` - ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` - ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` - ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` - ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` - ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` - FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` - FlocEnabled bool `json:"floc_enabled"` - GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` - GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` - GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` - GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` - GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` - GitpodEnabled bool `json:"gitpod_enabled"` - GitpodURL string `json:"gitpod_url"` - GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` - GloballyAllowedIPs string `json:"globally_allowed_ips"` - GrafanaEnabled bool `json:"grafana_enabled"` - GrafanaURL string `json:"grafana_url"` - GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` - GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` - GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` - HTMLEmailsEnabled bool `json:"html_emails_enabled"` - HashedStorageEnabled bool `json:"hashed_storage_enabled"` - HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` - HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` - HelpPageSupportURL string `json:"help_page_support_url"` - HelpPageText string `json:"help_page_text"` - HelpText string `json:"help_text"` - HideThirdPartyOffers bool `json:"hide_third_party_offers"` - HomePageURL string `json:"home_page_url"` - HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` - HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` - HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` - ImportSources []string `json:"import_sources"` - InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` - InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` - InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` - IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` - InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` - InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` - IssuesCreateLimit int `json:"issues_create_limit"` - JiraConnectApplicationKey string `json:"jira_connect_application_key"` - JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` - JiraConnectProxyURL string `json:"jira_connect_proxy_url"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - KrokiEnabled bool `json:"kroki_enabled"` - KrokiFormats map[string]bool `json:"kroki_formats"` - KrokiURL string `json:"kroki_url"` - LocalMarkdownVersion int `json:"local_markdown_version"` - LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` - LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` - LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` - MailgunEventsEnabled bool `json:"mailgun_events_enabled"` - MailgunSigningKey string `json:"mailgun_signing_key"` - MaintenanceMode bool `json:"maintenance_mode"` - MaintenanceModeMessage string `json:"maintenance_mode_message"` - MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` - MaxExportSize int `json:"max_export_size"` - MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` - MaxImportSize int `json:"max_import_size"` - MaxLoginAttempts int `json:"max_login_attempts"` - MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` - MaxPagesSize int `json:"max_pages_size"` - MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` - MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` - MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` - MaxYAMLDepth int `json:"max_yaml_depth"` - MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MinimumPasswordLength int `json:"minimum_password_length"` - MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` - NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` - NotesCreateLimit int `json:"notes_create_limit"` - NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` - NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` - OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` - OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` - PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` - PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` - PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` - PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` - PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` - PasswordNumberRequired bool `json:"password_number_required"` - PasswordSymbolRequired bool `json:"password_symbol_required"` - PasswordUppercaseRequired bool `json:"password_uppercase_required"` - PasswordLowercaseRequired bool `json:"password_lowercase_required"` - PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` - PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` - PerformanceBarEnabled bool `json:"performance_bar_enabled"` - PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` - PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` - PlantumlEnabled bool `json:"plantuml_enabled"` - PlantumlURL string `json:"plantuml_url"` - PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` - PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` - PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` - ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` - ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` - ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` - PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` - ProtectedCIVariables bool `json:"protected_ci_variables"` - PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` - PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` - RSAKeyRestriction int `json:"rsa_key_restriction"` - RateLimitingResponseText string `json:"rate_limiting_response_text"` - RawBlobRequestLimit int `json:"raw_blob_request_limit"` - RecaptchaEnabled bool `json:"recaptcha_enabled"` - RecaptchaPrivateKey string `json:"recaptcha_private_key"` - RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` - ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` - RememberMeEnabled bool `json:"remember_me_enabled"` - RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` - RepositoryStorages []string `json:"repository_storages"` - RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` - RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` - RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` - RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` - RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` - RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - SearchRateLimit int `json:"search_rate_limit"` - SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` - SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` - SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` - SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` - SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` - SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` - SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` - SecurityTXTContent string `json:"security_txt_content"` - SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` - SentryClientsideDSN string `json:"sentry_clientside_dsn"` - SentryDSN string `json:"sentry_dsn"` - SentryEnabled bool `json:"sentry_enabled"` - SentryEnvironment string `json:"sentry_environment"` - ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` - SessionExpireDelay int `json:"session_expire_delay"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` - SharedRunnersText string `json:"shared_runners_text"` - SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` - SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` - SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` - SignInText string `json:"sign_in_text"` - SignupEnabled bool `json:"signup_enabled"` - SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` - SilentModeEnabled bool `json:"silent_mode_enabled"` - SlackAppEnabled bool `json:"slack_app_enabled"` - SlackAppID string `json:"slack_app_id"` - SlackAppSecret string `json:"slack_app_secret"` - SlackAppSigningSecret string `json:"slack_app_signing_secret"` - SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnippetSizeLimit int `json:"snippet_size_limit"` - SnowplowAppID string `json:"snowplow_app_id"` - SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` - SnowplowCookieDomain string `json:"snowplow_cookie_domain"` - SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` - SnowplowEnabled bool `json:"snowplow_enabled"` - SourcegraphEnabled bool `json:"sourcegraph_enabled"` - SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` - SourcegraphURL string `json:"sourcegraph_url"` - SpamCheckAPIKey string `json:"spam_check_api_key"` - SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` - SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` - StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` - StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` - SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` - Terms string `json:"terms"` - ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` - ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` - ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` - ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` - ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` - ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` - ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` - ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` - ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` - ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` - ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` - ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` - ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` - ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` - ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` - ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` - ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` - ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` - ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` - ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` - ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` - ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` - ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` - TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` - UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` - UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` - UpdatedAt *time.Time `json:"updated_at"` - UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` - UsagePingEnabled bool `json:"usage_ping_enabled"` - UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` - UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` - UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` - UserDefaultExternal bool `json:"user_default_external"` - UserDefaultInternalRegex string `json:"user_default_internal_regex"` - UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` - UserOauthApplications bool `json:"user_oauth_applications"` - UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - UsersGetByIDLimit int `json:"users_get_by_id_limit"` - UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` - ValidRunnerRegistrars []string `json:"valid_runner_registrars"` - VersionCheckEnabled bool `json:"version_check_enabled"` - WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` - WhatsNewVariant string `json:"whats_new_variant"` - WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` + ID int `json:"id"` + AbuseNotificationEmail string `json:"abuse_notification_email"` + AdminMode bool `json:"admin_mode"` + AfterSignOutPath string `json:"after_sign_out_path"` + AfterSignUpText string `json:"after_sign_up_text"` + AkismetAPIKey string `json:"akismet_api_key"` + AkismetEnabled bool `json:"akismet_enabled"` + AllowAccountDeletion bool `json:"allow_account_deletion"` + AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` + AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` + AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` + AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` + AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` + ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` + ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` + AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` + AssetProxyEnabled bool `json:"asset_proxy_enabled"` + AssetProxyURL string `json:"asset_proxy_url"` + AssetProxySecretKey string `json:"asset_proxy_secret_key"` + AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` + AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` + AutoDevOpsDomain string `json:"auto_devops_domain"` + AutoDevOpsEnabled bool `json:"auto_devops_enabled"` + AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` + BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` + BulkImportEnabled bool `json:"bulk_import_enabled"` + BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` + CanCreateGroup bool `json:"can_create_group"` + CheckNamespacePlan bool `json:"check_namespace_plan"` + CIMaxIncludes int `json:"ci_max_includes"` + CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` + CommitEmailHostname string `json:"commit_email_hostname"` + ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` + ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` + ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` + ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` + ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` + ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` + ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` + ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + CreatedAt *time.Time `json:"created_at"` + CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` + DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` + DSAKeyRestriction int `json:"dsa_key_restriction"` + DeactivateDormantUsers bool `json:"deactivate_dormant_users"` + DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` + DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` + DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` + DefaultBranchName string `json:"default_branch_name"` + DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtectionDefaults *BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath string `json:"default_ci_config_path"` + DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` + DefaultPreferredLanguage string `json:"default_preferred_language"` + DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` + DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` + DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` + DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` + DelayedGroupDeletion bool `json:"delayed_group_deletion"` + DelayedProjectDeletion bool `json:"delayed_project_deletion"` + DeleteInactiveProjects bool `json:"delete_inactive_projects"` + DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` + DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DiagramsnetEnabled bool `json:"diagramsnet_enabled"` + DiagramsnetURL string `json:"diagramsnet_url"` + DiffMaxFiles int `json:"diff_max_files"` + DiffMaxLines int `json:"diff_max_lines"` + DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` + DisableFeedToken bool `json:"disable_feed_token"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` + DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` + DomainAllowlist []string `json:"domain_allowlist"` + DomainDenylist []string `json:"domain_denylist"` + DomainDenylistEnabled bool `json:"domain_denylist_enabled"` + DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` + DuoFeaturesEnabled bool `json:"duo_features_enabled"` + ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + EKSAccessKeyID string `json:"eks_access_key_id"` + EKSAccountID string `json:"eks_account_id"` + EKSIntegrationEnabled bool `json:"eks_integration_enabled"` + EKSSecretAccessKey string `json:"eks_secret_access_key"` + Ed25519KeyRestriction int `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + ElasticsearchAWS bool `json:"elasticsearch_aws"` + ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` + ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` + ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` + ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` + ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` + ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` + ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` + ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchIndexing bool `json:"elasticsearch_indexing"` + ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` + ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` + ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchPassword string `json:"elasticsearch_password"` + ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` + ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` + ElasticsearchSearch bool `json:"elasticsearch_search"` + ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchURL []string `json:"elasticsearch_url"` + ElasticsearchUsername string `json:"elasticsearch_username"` + ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` + EmailAdditionalText string `json:"email_additional_text"` + EmailAuthorInBody bool `json:"email_author_in_body"` + EmailConfirmationSetting string `json:"email_confirmation_setting"` + EmailRestrictions string `json:"email_restrictions"` + EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` + EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` + EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` + EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` + EnforcePATExpiration bool `json:"enforce_pat_expiration"` + EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` + EnforceTerms bool `json:"enforce_terms"` + ExternalAuthClientCert string `json:"external_auth_client_cert"` + ExternalAuthClientKey string `json:"external_auth_client_key"` + ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` + ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` + ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` + ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` + ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` + ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` + ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` + FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` + FileTemplateProjectID int `json:"file_template_project_id"` + FirstDayOfWeek int `json:"first_day_of_week"` + FlocEnabled bool `json:"floc_enabled"` + GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` + GeoStatusTimeout int `json:"geo_status_timeout"` + GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` + GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int `json:"gitaly_timeout_default"` + GitalyTimeoutFast int `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` + GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` + GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` + GitpodEnabled bool `json:"gitpod_enabled"` + GitpodURL string `json:"gitpod_url"` + GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` + GloballyAllowedIPs string `json:"globally_allowed_ips"` + GrafanaEnabled bool `json:"grafana_enabled"` + GrafanaURL string `json:"grafana_url"` + GravatarEnabled bool `json:"gravatar_enabled"` + GroupDownloadExportLimit int `json:"group_download_export_limit"` + GroupExportLimit int `json:"group_export_limit"` + GroupImportLimit int `json:"group_import_limit"` + GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` + GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + HTMLEmailsEnabled bool `json:"html_emails_enabled"` + HashedStorageEnabled bool `json:"hashed_storage_enabled"` + HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` + HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` + HelpPageSupportURL string `json:"help_page_support_url"` + HelpPageText string `json:"help_page_text"` + HelpText string `json:"help_text"` + HideThirdPartyOffers bool `json:"hide_third_party_offers"` + HomePageURL string `json:"home_page_url"` + HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` + HousekeepingEnabled bool `json:"housekeeping_enabled"` + HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` + ImportSources []string `json:"import_sources"` + InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` + InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` + InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` + IssuesCreateLimit int `json:"issues_create_limit"` + JiraConnectApplicationKey string `json:"jira_connect_application_key"` + JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` + JiraConnectProxyURL string `json:"jira_connect_proxy_url"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + KrokiEnabled bool `json:"kroki_enabled"` + KrokiFormats map[string]bool `json:"kroki_formats"` + KrokiURL string `json:"kroki_url"` + LocalMarkdownVersion int `json:"local_markdown_version"` + LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` + LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` + LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` + MailgunEventsEnabled bool `json:"mailgun_events_enabled"` + MailgunSigningKey string `json:"mailgun_signing_key"` + MaintenanceMode bool `json:"maintenance_mode"` + MaintenanceModeMessage string `json:"maintenance_mode_message"` + MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` + MaxArtifactsSize int `json:"max_artifacts_size"` + MaxAttachmentSize int `json:"max_attachment_size"` + MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` + MaxExportSize int `json:"max_export_size"` + MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` + MaxImportSize int `json:"max_import_size"` + MaxLoginAttempts int `json:"max_login_attempts"` + MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` + MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` + MaxYAMLDepth int `json:"max_yaml_depth"` + MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` + MinimumPasswordLength int `json:"minimum_password_length"` + MirrorAvailable bool `json:"mirror_available"` + MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int `json:"mirror_max_capacity"` + MirrorMaxDelay int `json:"mirror_max_delay"` + NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` + NotesCreateLimit int `json:"notes_create_limit"` + NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` + NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` + OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` + OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` + PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` + PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` + PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` + PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` + PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` + PasswordNumberRequired bool `json:"password_number_required"` + PasswordSymbolRequired bool `json:"password_symbol_required"` + PasswordUppercaseRequired bool `json:"password_uppercase_required"` + PasswordLowercaseRequired bool `json:"password_lowercase_required"` + PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` + PerformanceBarEnabled bool `json:"performance_bar_enabled"` + PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` + PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PlantumlEnabled bool `json:"plantuml_enabled"` + PlantumlURL string `json:"plantuml_url"` + PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` + PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` + PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` + ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectExportEnabled bool `json:"project_export_enabled"` + ProjectExportLimit int `json:"project_export_limit"` + ProjectImportLimit int `json:"project_import_limit"` + ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` + ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` + ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` + PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` + ProtectedCIVariables bool `json:"protected_ci_variables"` + PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` + PushEventActivitiesLimit int `json:"push_event_activities_limit"` + PushEventHooksLimit int `json:"push_event_hooks_limit"` + PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` + RSAKeyRestriction int `json:"rsa_key_restriction"` + RateLimitingResponseText string `json:"rate_limiting_response_text"` + RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RecaptchaEnabled bool `json:"recaptcha_enabled"` + RecaptchaPrivateKey string `json:"recaptcha_private_key"` + RecaptchaSiteKey string `json:"recaptcha_site_key"` + ReceiveMaxInputSize int `json:"receive_max_input_size"` + ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` + RememberMeEnabled bool `json:"remember_me_enabled"` + RepositoryChecksEnabled bool `json:"repository_checks_enabled"` + RepositorySizeLimit int `json:"repository_size_limit"` + RepositoryStorages []string `json:"repository_storages"` + RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` + RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` + RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` + RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` + RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + SearchRateLimit int `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` + SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` + SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` + SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` + SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` + SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` + SecurityTXTContent string `json:"security_txt_content"` + SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` + SentryClientsideDSN string `json:"sentry_clientside_dsn"` + SentryDSN string `json:"sentry_dsn"` + SentryEnabled bool `json:"sentry_enabled"` + SentryEnvironment string `json:"sentry_environment"` + ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` + SessionExpireDelay int `json:"session_expire_delay"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersText string `json:"shared_runners_text"` + SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` + SignInText string `json:"sign_in_text"` + SignupEnabled bool `json:"signup_enabled"` + SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` + SilentModeEnabled bool `json:"silent_mode_enabled"` + SlackAppEnabled bool `json:"slack_app_enabled"` + SlackAppID string `json:"slack_app_id"` + SlackAppSecret string `json:"slack_app_secret"` + SlackAppSigningSecret string `json:"slack_app_signing_secret"` + SlackAppVerificationToken string `json:"slack_app_verification_token"` + SnippetSizeLimit int `json:"snippet_size_limit"` + SnowplowAppID string `json:"snowplow_app_id"` + SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` + SnowplowCookieDomain string `json:"snowplow_cookie_domain"` + SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` + SnowplowEnabled bool `json:"snowplow_enabled"` + SourcegraphEnabled bool `json:"sourcegraph_enabled"` + SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` + SourcegraphURL string `json:"sourcegraph_url"` + SpamCheckAPIKey string `json:"spam_check_api_key"` + SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` + SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` + StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` + StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` + SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` + TerminalMaxSessionTime int `json:"terminal_max_session_time"` + Terms string `json:"terms"` + ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` + ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` + ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` + ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` + ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` + ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` + ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` + ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` + TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` + UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` + UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` + UsagePingEnabled bool `json:"usage_ping_enabled"` + UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` + UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` + UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` + UserDefaultExternal bool `json:"user_default_external"` + UserDefaultInternalRegex string `json:"user_default_internal_regex"` + UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` + UserOauthApplications bool `json:"user_oauth_applications"` + UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` + UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` + ValidRunnerRegistrars []string `json:"valid_runner_registrars"` + VersionCheckEnabled bool `json:"version_check_enabled"` + WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + WhatsNewVariant string `json:"whats_new_variant"` + WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` // Deprecated: Use AbuseNotificationEmail instead. AdminNotificationEmail string `json:"admin_notification_email"` @@ -462,17 +462,6 @@ type Settings struct { UserEmailLookupLimit int `json:"user_email_lookup_limit"` } -// BranchProtectionDefaults represents default Git protected branch permissions. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults -type BranchProtectionDefaults struct { - AllowedToPush []int `json:"allowed_to_push,omitempty"` - AllowForcePush bool `json:"allow_force_push,omitempty"` - AllowedToMerge []int `json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` -} - // Settings requires a custom unmarshaller in order to properly unmarshal // `container_registry_import_created_before` which is either a time.Time or // an empty string if no value is set. diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/github.com/xanzy/go-gitlab/types.go index 9ce13d73..5b7f74eb 100644 --- a/vendor/github.com/xanzy/go-gitlab/types.go +++ b/vendor/github.com/xanzy/go-gitlab/types.go @@ -89,6 +89,11 @@ func AccessLevel(v AccessLevelValue) *AccessLevelValue { return Ptr(v) } +type AccessLevelDetails struct { + IntegerValue AccessLevelValue `json:"integer_value"` + StringValue string `json:"string_value"` +} + // UserIDValue represents a user ID value within GitLab. type UserIDValue string diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go index f463952a..f8566780 100644 --- a/vendor/github.com/xanzy/go-gitlab/users.go +++ b/vendor/github.com/xanzy/go-gitlab/users.go @@ -61,6 +61,16 @@ type BasicUser struct { WebURL string `json:"web_url"` } +// ServiceAccount represents a GitLab service account. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/user_service_accounts.html +type ServiceAccount struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` +} + // User represents a GitLab user. // // GitLab API docs: https://docs.gitlab.com/ee/api/users.html @@ -1543,9 +1553,10 @@ func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options . return r, resp, nil } -// CreateServiceAccountUser creates a new service account user. Note only administrators can create new service account users. +// CreateServiceAccountUser creates a new service account user. // -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#create-service-account-user +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-service-account-user func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (*User, *Response, error) { req, err := s.client.NewRequest(http.MethodPost, "service_accounts", nil, options) if err != nil { @@ -1561,6 +1572,25 @@ func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (* return usr, resp, nil } +// ListServiceAccounts lists all service accounts. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-service-account-user +func (s *UsersService) ListServiceAccounts(opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*ServiceAccount, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "service_accounts", opt, options) + if err != nil { + return nil, nil, err + } + + var sas []*ServiceAccount + resp, err := s.client.Do(req, &sas) + if err != nil { + return nil, resp, err + } + + return sas, resp, nil +} + // UploadAvatar uploads an avatar to the current user. // // GitLab API docs: diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index a199b36b..18436eae 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -30,7 +30,7 @@ const ( type InterceptorFilter func(*InterceptorInfo) bool // Filter is a predicate used to determine whether a given request in -// should be instrumented by the attatched RPC tag info. +// should be instrumented by the attached RPC tag info. // A Filter must return true if the request should be instrumented. type Filter func(*stats.RPCTagInfo) bool @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 7f19058e..7d5ed058 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -7,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -136,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -333,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fad58733..fbcbfb84 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -151,7 +151,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -167,7 +167,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -204,14 +204,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 3f9cfda5..04f425ed 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.52.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index deea1496..6aae83bf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -12,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf5..5d6e6156 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index c1015a9e..a01bfafb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -100,7 +103,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index c64f8bec..33580a35 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,10 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -23,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -33,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -55,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -66,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -77,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -87,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -95,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -133,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -165,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -182,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -204,41 +164,47 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 00000000..a945f556 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 00000000..aea171fb --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9be3feef..9cae4cab 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,11 +4,15 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" + "os" + "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -19,46 +23,57 @@ type ResponseTelemetry struct { WriteError error } -type HTTPServer interface { - // RequestTraceAttrs returns trace attributes for an HTTP request received by a - // server. - // - // The server must be the primary server name if it is known. For example this - // would be the ServerName directive - // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache - // server, and the server_name directive - // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an - // nginx server. More generically, the primary server name would be the host - // header value that matches the default virtual host of an HTTP server. It - // should include the host identifier and if a port is used to route to the - // server that port identifier should be included as an appropriate port - // suffix. - // - // If the primary server name is not known, server should be an empty string. - // The req Host will be used to determine the server instead. - RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue - - // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. - // - // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. - ResponseTraceAttrs(ResponseTelemetry) []attribute.KeyValue - - // Route returns the attribute for the route. - Route(string) attribute.KeyValue +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } -// var warnOnce = sync.Once{} +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} -func NewHTTPServer() HTTPServer { - // TODO (#5331): Detect version based on environment variable OTEL_HTTP_CLIENT_COMPATIBILITY_MODE. - // TODO (#5331): Add warning of use of a deprecated version of Semantic Versions. - return oldHTTPServer{} +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) } -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -67,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 00000000..745b8c67 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index c92076bc..e6e14924 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -5,8 +5,13 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ import ( "net" + "net/http" "strconv" "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -45,5 +50,49 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index d753083b..c999b05e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -4,18 +4,21 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) type oldHTTPServer struct{} -var _ HTTPServer = oldHTTPServer{} - // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // @@ -45,7 +48,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } - if resp.ReadError != nil && resp.ReadError != io.EOF { + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) } @@ -55,7 +58,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke if resp.StatusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) } - if resp.WriteError != nil && resp.WriteError != io.EOF { + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { // This is not in the semantic conventions, but is historically provided attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) } @@ -73,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d5c0093f..b80a1db6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -92,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -138,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 1548b2db..ea504e39 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -37,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 8a25e586..b4119d34 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,15 +11,16 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -27,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -54,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -77,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -137,49 +142,56 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -191,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 22e485dd..502c1bda 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.52.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 2f4cc124..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b64..d9abe194 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998..6107c17b 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 20255493..5904bb70 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,7 +5,7 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f5..b7402576 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -650,7 +650,7 @@ should be canceled. ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f7..070b1e57 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a890931..657df347 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3..59992984 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896c..b3569e95 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a..2acbac35 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c5950..921f8596 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b3..9b1da2c0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e0..14e08c24 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 00000000..ef85cb70 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 00000000..d3aa476e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4a..d49adf67 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 00000000..77952d2a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf24..dc5e34ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 00000000..c9b7cdbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab289605..f67039ed 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82..3ba611d7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,15 +29,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.5.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: @@ -46,4 +47,3 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 731d2ac6..fd5ee845 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -1,2715 +1,9762 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. +// Code generated by command: go run chacha20poly1305_amd64_asm.go -out ../chacha20poly1305_amd64.s -pkg chacha20poly1305. DO NOT EDIT. //go:build gc && !purego #include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 - -// Some macros - -// ROL rotates the uint32s in register R left by N bits, using temporary T. -#define ROL(N, R, T) \ - MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R - -// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL16(R, T) PSHUFB ·rol16<>(SB), R -#else -#define ROL16(R, T) ROL(16, R, T) -#endif - -// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL8(R, T) PSHUFB ·rol8<>(SB), R -#else -#define ROL8(R, T) ROL(8, R, T) -#endif - -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; ROL16(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; ROL8(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- + +// func polyHashADInternal<>() TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul + // Hack: Must declare #define macros inside of a function due to Avo constraints + // ROL rotates the uint32s in register R left by N bits, using temporary T. + #define ROL(N, R, T) \ + MOVO R, T; \ + PSLLL $(N), T; \ + PSRLL $(32-(N)), R; \ + PXOR T, R + + // ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL8(R, T) PSHUFB ·rol8<>(SB), R + #else + #define ROL8(R, T) ROL(8, R, T) + #endif + + // ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL16(R, T) PSHUFB ·rol16<>(SB), R + #else + #define ROL16(R, T) ROL(16, R, T) + #endif + XORQ R10, R10 + XORQ R11, R11 + XORQ R12, R12 + CMPQ R9, $0x0d + JNE hashADLoop + MOVQ (CX), R10 + MOVQ 5(CX), R11 + SHRQ $0x18, R11 + MOVQ $0x00000001, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 RET hashADLoop: // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop + CMPQ R9, $0x10 + JB hashADTail + ADDQ (CX), R10 + ADCQ 8(CX), R11 + ADCQ $0x01, R12 + LEAQ 16(CX), CX + SUBQ $0x10, R9 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP hashADLoop hashADTail: - CMPQ itr2, $0 + CMPQ R9, $0x00 JE hashADDone // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp + XORQ R13, R13 + XORQ R14, R14 + XORQ R15, R15 + ADDQ R9, CX hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD + SHLQ $0x08, R13, R14 + SHLQ $0x08, R13 + MOVB -1(CX), R15 + XORQ R15, R13 + DECQ CX + DECQ R9 + JNE hashADTailLoop + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + hashADDone: RET -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 +// func chacha20Poly1305Open(dst []byte, key []uint32, src []byte, ad []byte) bool +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Open(SB), $288-97 // For aligned stack access MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX // Check for AVX2 support - CMPB ·useAVX2(SB), $1 + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Open_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster + CMPQ BX, $0x80 + JBE openSSE128 // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X9, X13 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 + MOVO X3, 32(BP) + MOVO X6, 48(BP) + MOVO X9, 128(BP) + MOVQ $0x0000000a, R9 openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + DECQ R9 + JNE openSSEPreparePolyKey // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSEMainLoop: - CMPQ inl, $256 + CMPQ BX, $0x00000100 JB openSSEMainLoopDone // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash + // 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $0x00000004, CX + MOVQ SI, R9 openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(R9), R9 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ CX + JGE openSSEInternalLoop + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + CMPQ CX, $-6 + JG openSSEInternalLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl + MOVO X15, 64(BP) + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU X0, (DI) + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU X3, 16(DI) + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU X6, 32(DI) + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X9, 48(DI) + MOVOU 64(SI), X9 + PXOR X9, X1 + MOVOU X1, 64(DI) + MOVOU 80(SI), X9 + PXOR X9, X4 + MOVOU X4, 80(DI) + MOVOU 96(SI), X9 + PXOR X9, X7 + MOVOU X7, 96(DI) + MOVOU 112(SI), X9 + PXOR X9, X10 + MOVOU X10, 112(DI) + MOVOU 128(SI), X9 + PXOR X9, X2 + MOVOU X2, 128(DI) + MOVOU 144(SI), X9 + PXOR X9, X5 + MOVOU X5, 144(DI) + MOVOU 160(SI), X9 + PXOR X9, X8 + MOVOU X8, 160(DI) + MOVOU 176(SI), X9 + PXOR X9, X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X9 + PXOR X9, X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X9 + PXOR X9, X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X9 + PXOR X9, X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X9 + PXOR 64(BP), X9 + MOVOU X9, 240(DI) + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openSSEMainLoop openSSEMainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $64 + CMPQ BX, $0x40 JBE openSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openSSETail128 - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openSSETail192 JMP openSSETail256 openSSEFinalize: // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally, constant time compare to the tag at the end of the message XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 + MOVQ $0x00000001, DX + XORQ (SI), R10 + XORQ 8(SI), R11 + ORQ R11, R10 CMOVQEQ DX, AX // Return true iff tags are equal MOVB AX, ret+96(FP) RET -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE openSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSE128Open: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail16 - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0(inp)) + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP openSSE128Open openSSETail16: - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVOU (SI), X12 + ADDQ BX, SI + PAND -16(R13)(R9*1), X12 + MOVO X12, 64(BP) + MOVQ X12, R13 + MOVQ 72(BP), R14 + PXOR X1, X12 // We can only store one byte at a time, since plaintext can be shorter than 16 bytes openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl + MOVQ X12, R8 + MOVB R8, (DI) + PSRLDQ $0x01, X12 + INCQ DI + DECQ BX JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 JMP openSSEFinalize -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + XORQ R9, R9 + MOVQ BX, CX + CMPQ CX, $0x10 + JB openSSETail64LoopB openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + CMPQ CX, $0x10 + JAE openSSETail64LoopA + CMPQ R9, $0xa0 + JNE openSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + PADDL 48(BP), X6 + PADDL 80(BP), X9 openSSETail64DecLoop: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 + SUBQ $0x10, BX + MOVOU (SI), X12 + PXOR X12, X0 + MOVOU X0, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVO X3, X0 + MOVO X6, X3 + MOVO X9, X6 JMP openSSETail64DecLoop openSSETail64DecLoopDone: - MOVO A0, A1 + MOVO X0, X1 JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 96(BP) + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + CMPQ R9, CX + JB openSSETail128LoopA + CMPQ R9, $0xa0 + JNE openSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 96(BP), X9 + PADDL 80(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + SUBQ $0x40, BX + LEAQ 64(SI), SI + LEAQ 64(DI), DI + JMP openSSETail64DecLoop + openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 + MOVO ·chacha20Constants<>+0(SB), X2 + MOVO 32(BP), X5 + MOVO 48(BP), X8 + MOVO 128(BP), X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 80(BP) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 112(BP) + MOVQ BX, CX + MOVQ $0x000000a0, R9 + CMPQ CX, $0xa0 + CMOVQGT R9, CX + ANDQ $-16, CX + XORQ R9, R9 openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + CMPQ R9, CX + JB openSSLTail192LoopA + CMPQ R9, $0xa0 + JNE openSSLTail192LoopB + CMPQ BX, $0xb0 + JB openSSLTail192Store + ADDQ 160(SI), R10 + ADCQ 168(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + CMPQ BX, $0xc0 + JB openSSLTail192Store + ADDQ 176(SI), R10 + ADCQ 184(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 112(BP), X9 + PADDL 96(BP), X10 + PADDL 80(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X2 + PXOR X13, X5 + PXOR X14, X8 + PXOR X15, X11 + MOVOU X2, (DI) + MOVOU X5, 16(DI) + MOVOU X8, 32(DI) + MOVOU X11, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + LEAQ 128(DI), DI + JMP openSSETail64DecLoop + openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + XORQ R9, R9 openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + ADDQ $0x10, R9 + CMPQ R9, $0xa0 + JB openSSETail256Loop + MOVQ BX, CX + ANDQ $-16, CX openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, R9 + CMPQ R9, CX + JB openSSETail256HashLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + LEAQ 192(SI), SI + LEAQ 192(DI), DI + SUBQ $0xc0, BX + MOVO X12, X0 + MOVO X13, X3 + MOVO X14, X6 + MOVO 64(BP), X9 + JMP openSSETail64DecLoop + chacha20Poly1305Open_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimization, for very short buffers - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openAVX2192 - CMPQ inl, $320 + CMPQ BX, $0x00000140 JBE openAVX2320 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, 64(BP) + VMOVDQA Y4, 192(BP) + MOVQ $0x0000000a, R9 openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ R9 + JNE openAVX2PreparePolyKey + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD 32(BP), Y14, Y14 + VPADDD 64(BP), Y12, Y12 + VPADDD 192(BP), Y4, Y4 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, CX + CMPQ CX, $0x40 + JNE openAVX2InitialHash64 // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VMOVDQU Y0, (DI) + VMOVDQU Y14, 32(DI) + LEAQ 64(SI), SI + LEAQ 64(DI), DI + SUBQ $0x40, BX openAVX2MainLoop: - CMPQ inl, $512 + CMPQ BX, $0x00000200 JB openAVX2MainLoopDone // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(SI)(CX*1), R10 + ADCQ 24(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(SI)(CX*1), R10 + ADCQ 40(SI)(CX*1), R11 + ADCQ $0x01, R12 + LEAQ 48(CX), CX + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + CMPQ CX, $0x000001e0 JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ 480(SI), R10 + ADCQ 488(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl + ADDQ 496(SI), R10 + ADCQ 504(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + LEAQ 512(DI), DI + SUBQ $0x00000200, BX JMP openAVX2MainLoop openAVX2MainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openAVX2Tail128 - CMPQ inl, $256 + CMPQ BX, $0x00000100 JBE openAVX2Tail256 - CMPQ inl, $384 + CMPQ BX, $0x00000180 JBE openAVX2Tail384 JMP openAVX2Tail512 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 openAVX2ShortOpen: // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openAVX2ShortOpenLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(SI), R10 + ADCQ 24(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP openAVX2ShortOpenLoop openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2ShortDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP openAVX2ShortOpen -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openAVX2Tail128: // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y1 + VMOVDQA Y1, Y4 + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + TESTQ CX, CX + JE openAVX2Tail128LoopB openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + ADDQ $0x10, R9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail128LoopA + CMPQ R9, $0xa0 + JNE openAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y13, Y13 + VPADDD Y4, Y1, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 openAVX2TailLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2Tail - SUBQ $32, inl + SUBQ $0x20, BX // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 JMP openAVX2TailLoop openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2TailDone - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2TailDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x80, CX + SHRQ $0x04, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX - // Perform ChaCha rounds, while hashing the remaining input openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX JB openAVX2Tail256LoopA + CMPQ R9, $0x0a + JNE openAVX2Tail256LoopB + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail256HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail256Hash + openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y6 + VPERM2I128 $0x02, Y12, Y4, Y10 + VPERM2I128 $0x13, Y0, Y14, Y8 + VPERM2I128 $0x13, Y12, Y4, Y2 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR (SI), Y6, Y6 + VPXOR 32(SI), Y10, Y10 + VPXOR 64(SI), Y8, Y8 + VPXOR 96(SI), Y2, Y2 + VMOVDQU Y6, (DI) + VMOVDQU Y10, 32(DI) + VMOVDQU Y8, 64(DI) + VMOVDQU Y2, 96(DI) + LEAQ 128(SI), SI + LEAQ 128(DI), DI + SUBQ $0x80, BX + JMP openAVX2TailLoop + openAVX2Tail384: // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x00000100, CX + SHRQ $0x04, CX + ADDQ $0x06, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + CMPQ R9, CX + JB openAVX2Tail384LoopB + CMPQ R9, $0x0a + JNE openAVX2Tail384LoopA + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail384HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail384Hash + openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openAVX2TailLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + MOVQ SI, R9 openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(R9), R10 + ADCQ 24(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(R9), R9 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + INCQ CX + CMPQ CX, $0x04 JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 + CMPQ CX, $0x0a + JNE openAVX2Tail512LoopA + MOVQ BX, CX + SUBQ $0x00000180, CX + ANDQ $-16, CX openAVX2Tail512HashLoop: - TESTQ itr1, itr1 + TESTQ CX, CX JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + SUBQ $0x10, CX JMP openAVX2Tail512HashLoop openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + LEAQ 384(SI), SI + LEAQ 384(DI), DI + SUBQ $0x00000180, BX + JMP openAVX2TailLoop + +DATA ·chacha20Constants<>+0(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+4(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+8(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+12(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+16(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+20(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+24(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+28(SB)/4, $0x6b206574 +GLOBL ·chacha20Constants<>(SB), RODATA|NOPTR, $32 + +DATA ·polyClampMask<>+0(SB)/8, $0x0ffffffc0fffffff +DATA ·polyClampMask<>+8(SB)/8, $0x0ffffffc0ffffffc +DATA ·polyClampMask<>+16(SB)/8, $0xffffffffffffffff +DATA ·polyClampMask<>+24(SB)/8, $0xffffffffffffffff +GLOBL ·polyClampMask<>(SB), RODATA|NOPTR, $32 + +DATA ·sseIncMask<>+0(SB)/8, $0x0000000000000001 +DATA ·sseIncMask<>+8(SB)/8, $0x0000000000000000 +GLOBL ·sseIncMask<>(SB), RODATA|NOPTR, $16 + +DATA ·andMask<>+0(SB)/8, $0x00000000000000ff +DATA ·andMask<>+8(SB)/8, $0x0000000000000000 +DATA ·andMask<>+16(SB)/8, $0x000000000000ffff +DATA ·andMask<>+24(SB)/8, $0x0000000000000000 +DATA ·andMask<>+32(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+40(SB)/8, $0x0000000000000000 +DATA ·andMask<>+48(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+56(SB)/8, $0x0000000000000000 +DATA ·andMask<>+64(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+72(SB)/8, $0x0000000000000000 +DATA ·andMask<>+80(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+88(SB)/8, $0x0000000000000000 +DATA ·andMask<>+96(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+104(SB)/8, $0x0000000000000000 +DATA ·andMask<>+112(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+120(SB)/8, $0x0000000000000000 +DATA ·andMask<>+128(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+136(SB)/8, $0x00000000000000ff +DATA ·andMask<>+144(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+152(SB)/8, $0x000000000000ffff +DATA ·andMask<>+160(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+168(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+176(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+184(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+192(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+200(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+208(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+216(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+224(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+232(SB)/8, $0x00ffffffffffffff +GLOBL ·andMask<>(SB), RODATA|NOPTR, $240 + +DATA ·avx2InitMask<>+0(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+16(SB)/8, $0x0000000000000001 +DATA ·avx2InitMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2InitMask<>(SB), RODATA|NOPTR, $32 + +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0d0c0f0e09080b0a +DATA ·rol16<>+16(SB)/8, $0x0504070601000302 +DATA ·rol16<>+24(SB)/8, $0x0d0c0f0e09080b0a +GLOBL ·rol16<>(SB), RODATA|NOPTR, $32 + +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0e0d0c0f0a09080b +DATA ·rol8<>+16(SB)/8, $0x0605040702010003 +DATA ·rol8<>+24(SB)/8, $0x0e0d0c0f0a09080b +GLOBL ·rol8<>(SB), RODATA|NOPTR, $32 + +DATA ·avx2IncMask<>+0(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2IncMask<>+16(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2IncMask<>(SB), RODATA|NOPTR, $32 + +// func chacha20Poly1305Seal(dst []byte, key []uint32, src []byte, ad []byte) +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Seal(SB), $288-96 MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Seal_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster + CMPQ BX, $0x80 + JBE sealSSE128 // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store + MOVO X3, 32(BP) + MOVO X6, 48(BP) // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + MOVQ $0x0000000a, R9 sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JNE sealSSEIntroLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 64(DI) + MOVOU X5, 80(DI) + MOVOU X8, 96(DI) + MOVOU X11, 112(DI) + MOVQ $0x00000080, CX + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 128(DI) + MOVOU X13, 144(DI) + MOVOU X14, 160(DI) + MOVOU X15, 176(DI) + ADDQ $0x40, CX + SUBQ $0x40, BX + LEAQ 64(SI), SI + MOVQ $0x00000002, CX + MOVQ $0x00000008, R9 + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + CMPQ BX, $0xc0 + JBE sealSSETail192 sealSSEMainLoop: // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(DI), DI + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JGE sealSSEInnerLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSEInnerLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVO 64(BP), X15 + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + ADDQ $0xc0, SI + MOVQ $0x000000c0, CX + SUBQ $0xc0, BX + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + LEAQ 64(SI), SI + SUBQ $0x40, BX + MOVQ $0x00000006, CX + MOVQ $0x00000004, R9 + CMPQ BX, $0xc0 JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl + MOVQ BX, CX + TESTQ BX, BX JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 + MOVQ $0x00000006, CX + CMPQ BX, $0x40 JBE sealSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE sealSSETail128 JMP sealSSETail192 -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSETail64LoopA + DECQ R9 JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X4 + PADDL 48(BP), X7 + PADDL 80(BP), X10 + JMP sealSSE128Seal - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + DECQ CX + JG sealSSETail128LoopA + DECQ R9 + JGE sealSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVQ $0x00000040, CX + LEAQ 64(SI), SI + SUBQ $0x40, BX + JMP sealSSE128SealHash + sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 112(BP) sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ CX + JG sealSSETail192LoopA + DECQ R9 + JGE sealSSETail192LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + PADDL 112(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + JMP sealSSE128SealHash + sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE sealSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash + CMPQ CX, $0x10 + JB sealSSE128Seal + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealSSE128SealHash sealSSE128Seal: - CMPQ inl, $16 + CMPQ BX, $0x10 JB sealSSETail - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP sealSSE128Seal sealSSETail: - TESTQ inl, inl + TESTQ BX, BX JE sealSSEFinalize // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVQ BX, CX + LEAQ -1(SI)(BX*1), SI + XORQ R15, R15 + XORQ R8, R8 XORQ AX, AX sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 + SHLQ $0x08, R15, R8 + SHLQ $0x08, R15 + MOVB (SI), AX + XORQ AX, R15 + LEAQ -1(SI), SI + DECQ CX JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup + MOVQ R15, 64(BP) + MOVQ R8, 72(BP) + PXOR 64(BP), X1 + MOVOU X1, (DI) + MOVOU -16(R13)(R9*1), X12 + PAND X12, X1 + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ BX, DI sealSSEFinalize: // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) + MOVQ R10, (DI) + MOVQ R11, 8(DI) RET -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- chacha20Poly1305Seal_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster + CMPQ BX, $0x000000c0 + JBE seal192AVX2 + CMPQ BX, $0x00000140 + JBE seal320AVX2 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA Y12, 64(BP) + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, 96(BP) + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y1, 128(BP) + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, R9 sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ R9 + JNE sealAVX2IntroLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPERM2I128 $0x02, Y0, Y14, Y4 + VPERM2I128 $0x13, Y0, Y14, Y0 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, (BP) // Hash AD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y12, Y12 + VMOVDQU Y0, (DI) + VMOVDQU Y12, 32(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 64(SI), Y0, Y0 + VPXOR 96(SI), Y14, Y14 + VPXOR 128(SI), Y12, Y12 + VPXOR 160(SI), Y4, Y4 + VMOVDQU Y0, 64(DI) + VMOVDQU Y14, 96(DI) + VMOVDQU Y12, 128(DI) + VMOVDQU Y4, 160(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 192(SI), Y0, Y0 + VPXOR 224(SI), Y14, Y14 + VPXOR 256(SI), Y12, Y12 + VPXOR 288(SI), Y4, Y4 + VMOVDQU Y0, 192(DI) + VMOVDQU Y14, 224(DI) + VMOVDQU Y12, 256(DI) + VMOVDQU Y4, 288(DI) + MOVQ $0x00000140, CX + SUBQ $0x00000140, BX + LEAQ 320(SI), SI + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, Y15, Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, Y15, Y3, Y4 + CMPQ BX, $0x80 JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VPXOR 64(SI), Y12, Y12 + VPXOR 96(SI), Y4, Y4 + VMOVDQU Y0, 320(DI) + VMOVDQU Y14, 352(DI) + VMOVDQU Y12, 384(DI) + VMOVDQU Y4, 416(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVQ $0x00000008, CX + MOVQ $0x00000002, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + CMPQ BX, $0x00000200 + JBE sealAVX2Tail512 // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + SUBQ $0x10, DI + MOVQ $0x00000009, CX + JMP sealAVX2InternalLoopStart sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, CX sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(DI), R10 + ADCQ 40(DI), R11 + ADCQ $0x01, R12 + LEAQ 48(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 + ADDQ -16(DI), R10 + ADCQ -8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + SUBQ $0x00000200, BX + CMPQ BX, $0x00000200 JG sealAVX2MainLoop // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + MOVQ $0x0000000a, CX + MOVQ $0x00000000, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 sealAVX2ShortSeal: // Hash aad - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealAVX2SealHash: // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash + CMPQ CX, $0x10 + JB sealAVX2ShortSealLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealAVX2SealHash sealAVX2ShortSealLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB sealAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP sealAVX2ShortSealLoop sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB sealAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 sealAVX2ShortDone: VZEROUPPER JMP sealSSETail -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP sealAVX2ShortSeal -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA 32(BP), Y14 + VMOVDQA 64(BP), Y12 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, Y1 sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ CX + JG sealAVX2Tail128LoopA + DECQ R9 + JGE sealAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y5 + VPADDD 32(BP), Y14, Y9 + VPADDD 64(BP), Y12, Y13 + VPADDD Y1, Y4, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 JMP sealAVX2ShortSealLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ CX + JG sealAVX2Tail256LoopA + DECQ R9 + JGE sealAVX2Tail256LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2SealHash + sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + VMOVDQA Y2, Y15 sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ CX + JG sealAVX2Tail384LoopA + DECQ R9 + JGE sealAVX2Tail384LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPADDD Y15, Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + MOVQ $0x00000100, CX + LEAQ 256(SI), SI + SUBQ $0x00000100, BX + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + JMP sealAVX2SealHash + sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JG sealAVX2Tail512LoopA + DECQ R9 + JGE sealAVX2Tail512LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPXOR (SI), Y15, Y15 + VMOVDQU Y15, (DI) + VPERM2I128 $0x02, Y12, Y4, Y15 + VPXOR 32(SI), Y15, Y15 + VMOVDQU Y15, 32(DI) + VPERM2I128 $0x13, Y0, Y14, Y15 + VPXOR 64(SI), Y15, Y15 + VMOVDQU Y15, 64(DI) + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + MOVQ $0x00000180, CX + LEAQ 384(SI), SI + SUBQ $0x00000180, BX + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c647..13375738 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000..de58dfb8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..e3784123 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f..7688c356 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3e..617b4a47 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f9..0c5f64aa 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2199,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..6ff6bee7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..109997d7 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index ec07aab0..02609d5b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -201,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733..7d902b68 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 00000000..cb4a0c57 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c0..aca3199c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09e..ac54ecab 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -655,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -665,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200a..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4c..f08abd43 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 00000000..297e97bc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac17..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b24..de3b4624 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2933,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3233,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168..8aa6d77c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b59..da428f42 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e..bf45bfec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8..71c67162 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e..9476628f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea78..b9e85f3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1..a48b68a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b..ea00e852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6..91c64687 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cef..8cbf38d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a..a2df7341 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a..24791379 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61..d265f146 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c21..3f2d6443 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aac..5d8b727a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533e..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646b..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f6..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5ad..af30da55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f68..f485dbf4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c23..1893e2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf5..16a4017d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a5..a5459e76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..d003c3d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..0d45a941 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b9..3a69e454 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2486,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3766,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3834,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4609,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..4e613cf6 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd1..5cee9a31 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d5..7b97a154 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3..4c2e1bdc 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..93a798ab 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index f3426bd3..fd2e72c1 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.195.0" +const Version = "0.203.0" diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index d2a4f766..ff3539d8 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -247,6 +247,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) return pool, err } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 2e2b15c6..d5b213e0 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -130,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index fe19e8f9..aa69fb4d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a0778..5d4096d4 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e1087..abab2793 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b5..d7b40b7c 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f..b181f386 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec..2b87bd79 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 0adc9886..52f54e6a 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -780,7 +780,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { } var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken @@ -818,7 +818,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceRequest); i { case 0: return &v.state @@ -830,7 +830,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceRequest); i { case 0: return &v.state @@ -842,7 +842,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ClientStatsPerToken); i { case 0: return &v.state @@ -854,7 +854,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ClientStats); i { case 0: return &v.state @@ -866,7 +866,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceResponse); i { case 0: return &v.state @@ -878,7 +878,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*FallbackResponse); i { case 0: return &v.state @@ -890,7 +890,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceResponse); i { case 0: return &v.state @@ -902,7 +902,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ServerList); i { case 0: return &v.state @@ -914,7 +914,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Server); i { case 0: return &v.state @@ -927,11 +927,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() { } } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 57a792a7..84e6a250 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) } type loadBalancerClient struct { @@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{ClientStream: stream} + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} return x, nil } -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility +// for forward compatibility. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error } -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 47a3e938..c0987627 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603..4d69b405 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -50,7 +50,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf4..8ad6ce2f 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -252,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4..55bffaa7 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b4..9c8850e3 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1190,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd..e840858b 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 43726e87..7e4bfee8 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea..b5bbb549 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 0d64fb37..f1ea7bb2 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) { } return n, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 6c867dd8..50721f69 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -128,7 +128,7 @@ type altsHandshaker struct { // NewClientHandshaker creates a core.Handshaker that performs a client-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // NewServerHandshaker creates a core.Handshaker that performs a server-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index e1cdafb9..b3af0359 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,8 +34,6 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -50,7 +48,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 38cb5cf0..b7de8f05 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/altscontext.proto package grpc_gcp @@ -201,7 +201,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { } var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ +var file_grpc_gcp_altscontext_proto_goTypes = []any{ (*AltsContext)(nil), // 0: grpc.gcp.AltsContext nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel @@ -225,7 +225,7 @@ func file_grpc_gcp_altscontext_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AltsContext); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 55fc7f65..79b5dad4 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -533,7 +533,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. + // that the peer's out_frames are split into multiple HandshakeReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. @@ -1071,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1108,139 +1108,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, - 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, - 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, - 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, - 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, - 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, - 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, - 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, - 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1257,7 +1258,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ +var file_grpc_gcp_handshaker_proto_goTypes = []any{ (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol (*Endpoint)(nil), // 2: grpc.gcp.Endpoint @@ -1313,7 +1314,7 @@ func file_grpc_gcp_handshaker_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Endpoint); i { case 0: return &v.state @@ -1325,7 +1326,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -1337,7 +1338,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*StartClientHandshakeReq); i { case 0: return &v.state @@ -1349,7 +1350,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ServerHandshakeParameters); i { case 0: return &v.state @@ -1361,7 +1362,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*StartServerHandshakeReq); i { case 0: return &v.state @@ -1373,7 +1374,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*NextHandshakeMessageReq); i { case 0: return &v.state @@ -1385,7 +1386,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*HandshakerReq); i { case 0: return &v.state @@ -1397,7 +1398,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResult); i { case 0: return &v.state @@ -1409,7 +1410,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*HandshakerStatus); i { case 0: return &v.state @@ -1421,7 +1422,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResp); i { case 0: return &v.state @@ -1434,12 +1435,12 @@ func file_grpc_gcp_handshaker_proto_init() { } } } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } - file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 358074b6..34443b1d 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility +// for forward compatibility. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,14 +87,18 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 18cc9cfb..6956c14f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto package grpc_gcp @@ -253,7 +253,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version @@ -274,7 +274,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions); i { case 0: return &v.state @@ -286,7 +286,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return nil } } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions_Version); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee144..4c805c64 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index d475cbc0..328b838e 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -38,7 +38,7 @@ type TokenSource struct { } // GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) @@ -156,7 +156,7 @@ type serviceAccount struct { t *oauth2.Token } -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48..2b285bee 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859a..e7b532b6 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7..11d0ae14 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 00000000..074c5e23 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf0..ceec319d 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 00000000..1d827dd5 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 00000000..3221f7a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ce..f1ae080d 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9c..db320105 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 00000000..59c03bc1 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 00000000..e524fdd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102..07df71e9 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d82..4b203585 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7..892dc13d 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a8..96693289 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08..64c79195 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e15..078bb812 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6..0e6e18e1 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d9064871..452985f8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,11 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1..7617be21 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index 6717b757..43423d8a 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool { name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") return name == "Google" default: return false diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de..092ad187 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16..19b9d639 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1..6d8c2f51 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d665398..7aae9240 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -203,11 +203,31 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac5657..b901c7ba 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 00000000..fd33af51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 00000000..be110d41 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc820..75792538 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd..54c24c2f 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7..7e7aaa54 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8..d5c1085e 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4..ef72fbb3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce2..ce878693 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706..c769deab 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165..584b50fe 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd..3613d7b6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032..54b22443 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ad..924ba4f3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e..eb42b19f 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 00000000..c37c58c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 00000000..228e9c2f --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 00000000..4d66b2cc --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd..d2e15253 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd6336..e87a17f3 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296..00000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2..09e864a8 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb4523..23bb3fb2 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e..2d96f140 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e479..d1e1415a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe..00000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65..71195c49 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b..bb2b2a21 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529..0037fee0 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99..a96b6a6b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.67.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3..8f9e592f 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df2..0e72d853 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..fa790e0f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..fd4d0c83 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..7f67cbb6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..fb8e15e8 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2a..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ea..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d38699..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07..6dea75cd 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d..87da199a 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd9..b99d4d24 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb8..1761bc9c 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -55,11 +55,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb..19de8d37 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645..0d20722d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/config.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/config.go index b545a447..23ee0113 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/config.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/config.go @@ -8,6 +8,8 @@ package httptrace import ( "os" "regexp" + "strconv" + "strings" "gopkg.in/DataDog/dd-trace-go.v1/internal" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" @@ -22,6 +24,8 @@ const ( envQueryStringRegexp = "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP" // envTraceClientIPEnabled is the name of the env var used to specify whether or not to collect client ip in span tags envTraceClientIPEnabled = "DD_TRACE_CLIENT_IP_ENABLED" + // envServerErrorStatuses is the name of the env var used to specify error status codes on http server spans + envServerErrorStatuses = "DD_TRACE_HTTP_SERVER_ERROR_STATUSES" ) // defaultQueryStringRegexp is the regexp used for query string obfuscation if `envQueryStringRegexp` is empty. @@ -31,6 +35,7 @@ type config struct { queryStringRegexp *regexp.Regexp // specifies the regexp to use for query string obfuscation. queryString bool // reports whether the query string should be included in the URL span tag. traceClientIP bool + isStatusError func(statusCode int) bool } func newConfig() config { @@ -38,6 +43,11 @@ func newConfig() config { queryString: !internal.BoolEnv(envQueryStringDisabled, false), queryStringRegexp: defaultQueryStringRegexp, traceClientIP: internal.BoolEnv(envTraceClientIPEnabled, false), + isStatusError: isServerError, + } + v := os.Getenv(envServerErrorStatuses) + if fn := GetErrorCodesFromInput(v); fn != nil { + c.isStatusError = fn } if s, ok := os.LookupEnv(envQueryStringRegexp); !ok { return c @@ -47,7 +57,66 @@ func newConfig() config { } else if r, err := regexp.Compile(s); err == nil { c.queryStringRegexp = r } else { - log.Debug("Could not compile regexp from %s. Using default regexp instead.", envQueryStringRegexp) + log.Error("Could not compile regexp from %s. Using default regexp instead.", envQueryStringRegexp) } return c } + +func isServerError(statusCode int) bool { + return statusCode >= 500 && statusCode < 600 +} + +// GetErrorCodesFromInput parses a comma-separated string s to determine which codes are to be considered errors +// Its purpose is to support the DD_TRACE_HTTP_SERVER_ERROR_STATUSES env var +// If error condition cannot be determined from s, `nil` is returned +// e.g, input of "100,200,300-400" returns a function that returns true on 100, 200, and all values between 300-400, inclusive +// any input that cannot be translated to integer values returns nil +func GetErrorCodesFromInput(s string) func(statusCode int) bool { + if s == "" { + return nil + } + var codes []int + var ranges [][]int + vals := strings.Split(s, ",") + for _, val := range vals { + // "-" indicates a range of values + if strings.Contains(val, "-") { + bounds := strings.Split(val, "-") + if len(bounds) != 2 { + log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val) + return nil + } + before, err := strconv.Atoi(bounds[0]) + if err != nil { + log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val) + return nil + } + after, err := strconv.Atoi(bounds[1]) + if err != nil { + log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val) + return nil + } + ranges = append(ranges, []int{before, after}) + } else { + intVal, err := strconv.Atoi(val) + if err != nil { + log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val) + return nil + } + codes = append(codes, intVal) + } + } + return func(statusCode int) bool { + for _, c := range codes { + if c == statusCode { + return true + } + } + for _, bounds := range ranges { + if statusCode >= bounds[0] && statusCode <= bounds[1] { + return true + } + } + return false + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/httptrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/httptrace.go index 6c92fb80..6fa5a432 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/httptrace.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace/httptrace.go @@ -18,7 +18,7 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "gopkg.in/DataDog/dd-trace-go.v1/internal" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec" "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema" ) @@ -34,7 +34,7 @@ func StartRequestSpan(r *http.Request, opts ...ddtrace.StartSpanOption) (tracer. var ipTags map[string]string if cfg.traceClientIP { - ipTags, _ = httptrace.ClientIPTags(r.Header, true, r.RemoteAddr) + ipTags, _ = httpsec.ClientIPTags(r.Header, true, r.RemoteAddr) } nopts := make([]ddtrace.StartSpanOption, 0, len(opts)+1+len(ipTags)) nopts = append(nopts, @@ -65,15 +65,21 @@ func StartRequestSpan(r *http.Request, opts ...ddtrace.StartSpanOption) (tracer. // code. Any further span finish option can be added with opts. func FinishRequestSpan(s tracer.Span, status int, opts ...tracer.FinishOption) { var statusStr string + // if status is 0, treat it like 200 unless 0 was called out in DD_TRACE_HTTP_SERVER_ERROR_STATUSES if status == 0 { - statusStr = "200" + if cfg.isStatusError(status) { + statusStr = "0" + s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status))) + } else { + statusStr = "200" + } } else { statusStr = strconv.Itoa(status) + if cfg.isStatusError(status) { + s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status))) + } } s.SetTag(ext.HTTPCode, statusStr) - if status >= 500 && status < 600 { - s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status))) - } s.Finish(opts...) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go index cb658b45..22e63ee3 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go @@ -8,7 +8,9 @@ package http import ( "math" "net/http" + "os" + "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" @@ -18,7 +20,13 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer" ) -const defaultServiceName = "http.router" +const ( + defaultServiceName = "http.router" + // envClientQueryStringEnabled is the name of the env var used to specify whether query string collection is enabled for http client spans. + envClientQueryStringEnabled = "DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING" + // envClientErrorStatuses is the name of the env var that specifies error status codes on http client spans + envClientErrorStatuses = "DD_TRACE_HTTP_CLIENT_ERROR_STATUSES" +) type config struct { serviceName string @@ -146,6 +154,8 @@ type roundTripperConfig struct { spanOpts []ddtrace.StartSpanOption propagation bool errCheck func(err error) bool + queryString bool // reports whether the query string is included in the URL tag for http client spans + isStatusError func(statusCode int) bool } func newRoundTripperConfig() *roundTripperConfig { @@ -156,14 +166,22 @@ func newRoundTripperConfig() *roundTripperConfig { defaultSpanNamer := func(_ *http.Request) string { return spanName } - return &roundTripperConfig{ + + c := &roundTripperConfig{ serviceName: namingschema.ServiceNameOverrideV0("", ""), analyticsRate: globalconfig.AnalyticsRate(), resourceNamer: defaultResourceNamer, propagation: true, spanNamer: defaultSpanNamer, ignoreRequest: func(_ *http.Request) bool { return false }, + queryString: internal.BoolEnv(envClientQueryStringEnabled, true), + isStatusError: isClientError, + } + v := os.Getenv(envClientErrorStatuses) + if fn := httptrace.GetErrorCodesFromInput(v); fn != nil { + c.isStatusError = fn } + return c } // A RoundTripperOption represents an option that can be passed to @@ -264,3 +282,7 @@ func RTWithErrorCheck(fn func(err error) bool) RoundTripperOption { cfg.errCheck = fn } } + +func isClientError(statusCode int) bool { + return statusCode >= 400 && statusCode < 500 +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go index bd71daa8..7e47c530 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go @@ -7,11 +7,13 @@ package http import ( "fmt" - "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" "math" "net/http" "os" "strconv" + "strings" + + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" @@ -38,7 +40,7 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er tracer.SpanType(ext.SpanTypeHTTP), tracer.ResourceName(resourceName), tracer.Tag(ext.HTTPMethod, req.Method), - tracer.Tag(ext.HTTPURL, url.String()), + tracer.Tag(ext.HTTPURL, urlFromRequest(req, rt.cfg.queryString)), tracer.Tag(ext.Component, componentName), tracer.Tag(ext.SpanKind, ext.SpanKindClient), tracer.Tag(ext.NetworkDestinationName, url.Hostname()), @@ -86,7 +88,6 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er } res, err = rt.base.RoundTrip(r2) - if err != nil { span.SetTag("http.errors", err.Error()) if rt.cfg.errCheck == nil || rt.cfg.errCheck(err) { @@ -94,8 +95,7 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er } } else { span.SetTag(ext.HTTPCode, strconv.Itoa(res.StatusCode)) - // treat 5XX as errors - if res.StatusCode/100 == 5 { + if rt.cfg.isStatusError(res.StatusCode) { span.SetTag("http.errors", res.Status) span.SetTag(ext.Error, fmt.Errorf("%d: %s", res.StatusCode, http.StatusText(res.StatusCode))) } @@ -135,3 +135,32 @@ func WrapClient(c *http.Client, opts ...RoundTripperOption) *http.Client { c.Transport = WrapRoundTripper(c.Transport, opts...) return c } + +// urlFromRequest returns the URL from the HTTP request. The URL query string is included in the return object iff queryString is true +// See https://docs.datadoghq.com/tracing/configure_data_security#redacting-the-query-in-the-url for more information. +func urlFromRequest(r *http.Request, queryString bool) string { + // Quoting net/http comments about net.Request.URL on server requests: + // "For most requests, fields other than Path and RawQuery will be + // empty. (See RFC 7230, Section 5.3)" + // This is why we don't rely on url.URL.String(), url.URL.Host, url.URL.Scheme, etc... + var url string + path := r.URL.EscapedPath() + scheme := r.URL.Scheme + if r.TLS != nil { + scheme = "https" + } + if r.Host != "" { + url = strings.Join([]string{scheme, "://", r.Host, path}, "") + } else { + url = path + } + // Collect the query string if we are allowed to report it and obfuscate it if possible/allowed + if queryString && r.URL.RawQuery != "" { + query := r.URL.RawQuery + url = strings.Join([]string{url, query}, "?") + } + if frag := r.URL.EscapedFragment(); frag != "" { + url = strings.Join([]string{url, frag}, "#") + } + return url +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_payload.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_payload.go index df8ffc04..ce8cc0c2 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_payload.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_payload.go @@ -10,7 +10,10 @@ import ( "sync/atomic" "github.com/tinylib/msgp/msgp" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils" "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" "gopkg.in/DataDog/dd-trace-go.v1/internal/version" ) @@ -46,6 +49,7 @@ func (p *ciVisibilityPayload) push(event *ciVisibilityEvent) error { // // A pointer to a newly initialized civisibilitypayload instance. func newCiVisibilityPayload() *ciVisibilityPayload { + log.Debug("ciVisibilityPayload: creating payload instance") return &ciVisibilityPayload{newPayload()} } @@ -61,6 +65,27 @@ func newCiVisibilityPayload() *ciVisibilityPayload { // A pointer to a bytes.Buffer containing the encoded CI Visibility payload. // An error if reading from the buffer or encoding the payload fails. func (p *ciVisibilityPayload) getBuffer(config *config) (*bytes.Buffer, error) { + log.Debug("ciVisibilityPayload: .getBuffer (count: %v)", p.itemCount()) + + // Create a buffer to read the current payload + payloadBuf := new(bytes.Buffer) + if _, err := payloadBuf.ReadFrom(p.payload); err != nil { + return nil, err + } + + // Create the visibility payload + visibilityPayload := p.writeEnvelope(config.env, payloadBuf.Bytes()) + + // Create a new buffer to encode the visibility payload in MessagePack format + encodedBuf := new(bytes.Buffer) + if err := msgp.Encode(encodedBuf, visibilityPayload); err != nil { + return nil, err + } + + return encodedBuf, nil +} + +func (p *ciVisibilityPayload) writeEnvelope(env string, events []byte) *ciTestCyclePayload { /* The Payload format in the CI Visibility protocol is like this: @@ -82,36 +107,35 @@ func (p *ciVisibilityPayload) getBuffer(config *config) (*bytes.Buffer, error) { The event format can be found in the `civisibility_tslv.go` file in the ciVisibilityEvent documentation */ - // Create a buffer to read the current payload - payloadBuf := new(bytes.Buffer) - if _, err := payloadBuf.ReadFrom(p.payload); err != nil { - return nil, err - } - // Create the metadata map allMetadata := map[string]string{ "language": "go", "runtime-id": globalconfig.RuntimeID(), "library_version": version.Tag, } - if config.env != "" { - allMetadata["env"] = config.env + if env != "" { + allMetadata["env"] = env } // Create the visibility payload - visibilityPayload := ciTestCyclePayload{ + visibilityPayload := &ciTestCyclePayload{ Version: 1, Metadata: map[string]map[string]string{ "*": allMetadata, }, - Events: payloadBuf.Bytes(), + Events: events, } - // Create a new buffer to encode the visibility payload in MessagePack format - encodedBuf := new(bytes.Buffer) - if err := msgp.Encode(encodedBuf, &visibilityPayload); err != nil { - return nil, err + // Check for the test session name and append the tag at the metadata level + if testSessionName, ok := utils.GetCITags()[constants.TestSessionName]; ok { + testSessionMap := map[string]string{ + constants.TestSessionName: testSessionName, + } + visibilityPayload.Metadata["test_session_end"] = testSessionMap + visibilityPayload.Metadata["test_module_end"] = testSessionMap + visibilityPayload.Metadata["test_suite_end"] = testSessionMap + visibilityPayload.Metadata["test"] = testSessionMap } - return encodedBuf, nil + return visibilityPayload } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go index db64b5d7..07313320 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_transport.go @@ -105,6 +105,8 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport { testCycleURL = fmt.Sprintf("%s/%s/%s", config.agentURL.String(), EvpProxyPath, TestCyclePath) } + log.Debug("ciVisibilityTransport: creating transport instance [agentless: %v, testcycleurl: %v]", agentlessEnabled, testCycleURL) + return &ciVisibilityTransport{ config: config, testCycleURLPath: testCycleURL, @@ -157,6 +159,7 @@ func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error) req.Header.Set("Content-Encoding", "gzip") } + log.Debug("ciVisibilityTransport: sending transport request: %v bytes", buffer.Len()) response, err := t.config.httpClient.Do(req) if err != nil { return nil, err diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go index 377f6d56..ef6614d4 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_tslv.go @@ -273,6 +273,7 @@ func getCiVisibilityEvent(span *span) *ciVisibilityEvent { // A pointer to the created ciVisibilityEvent. func createTestEventFromSpan(span *span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag) @@ -298,6 +299,7 @@ func createTestEventFromSpan(span *span) *ciVisibilityEvent { // A pointer to the created ciVisibilityEvent. func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag) @@ -320,6 +322,7 @@ func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent { // A pointer to the created ciVisibilityEvent. func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag) return &ciVisibilityEvent{ @@ -341,6 +344,7 @@ func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent { // A pointer to the created ciVisibilityEvent. func createTestSessionEventFromSpan(span *span) *ciVisibilityEvent { tSpan := createTslvSpan(span) + tSpan.ParentID = 0 tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag) return &ciVisibilityEvent{ span: span, diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_writer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_writer.go index 1582b200..969b5ede 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_writer.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/civisibility_writer.go @@ -45,6 +45,7 @@ type ciVisibilityTraceWriter struct { // // A pointer to an initialized ciVisibilityTraceWriter. func newCiVisibilityTraceWriter(c *config) *ciVisibilityTraceWriter { + log.Debug("ciVisibilityTraceWriter: creating trace writer instance") return &ciVisibilityTraceWriter{ config: c, payload: newCiVisibilityPayload(), @@ -62,7 +63,7 @@ func (w *ciVisibilityTraceWriter) add(trace []*span) { for _, s := range trace { cvEvent := getCiVisibilityEvent(s) if err := w.payload.push(cvEvent); err != nil { - log.Error("Error encoding msgpack: %v", err) + log.Error("ciVisibilityTraceWriter: Error encoding msgpack: %v", err) } if w.payload.size() > agentlessPayloadSizeLimit { w.flush() @@ -104,16 +105,16 @@ func (w *ciVisibilityTraceWriter) flush() { var err error for attempt := 0; attempt <= w.config.sendRetries; attempt++ { size, count = p.size(), p.itemCount() - log.Debug("Sending payload: size: %d events: %d\n", size, count) + log.Debug("ciVisibilityTraceWriter: sending payload: size: %d events: %d\n", size, count) _, err = w.config.transport.send(p.payload) if err == nil { - log.Debug("sent events after %d attempts", attempt+1) + log.Debug("ciVisibilityTraceWriter: sent events after %d attempts", attempt+1) return } - log.Error("failure sending events (attempt %d), will retry: %v", attempt+1, err) + log.Error("ciVisibilityTraceWriter: failure sending events (attempt %d), will retry: %v", attempt+1, err) p.reset() time.Sleep(time.Millisecond) } - log.Error("lost %d events: %v", count, err) + log.Error("ciVisibilityTraceWriter: lost %d events: %v", count, err) }(oldp) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go index 55041e94..9175c20e 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go @@ -15,7 +15,6 @@ import ( "net/url" "os" "path/filepath" - "regexp" "runtime" "runtime/debug" "strconv" @@ -23,6 +22,7 @@ import ( "time" "golang.org/x/mod/semver" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/internal" @@ -69,7 +69,6 @@ var contribIntegrations = map[string]struct { "github.com/gomodule/redigo": {"Redigo", false}, "google.golang.org/api": {"Google API", false}, "google.golang.org/grpc": {"gRPC", false}, - "google.golang.org/grpc/v12": {"gRPC v12", false}, "gopkg.in/jinzhu/gorm.v1": {"Gorm (gopkg)", false}, "github.com/gorilla/mux": {"Gorilla Mux", false}, "gorm.io/gorm.v1": {"Gorm v1", false}, @@ -697,23 +696,6 @@ func (c *config) loadContribIntegrations(deps []*debug.Module) { } for _, d := range deps { p := d.Path - // special use case, since gRPC does not update version number - if p == "google.golang.org/grpc" { - re := regexp.MustCompile(`v(\d.\d)\d*`) - match := re.FindStringSubmatch(d.Version) - if match == nil { - log.Warn("Unable to parse version of GRPC %v", d.Version) - continue - } - ver, err := strconv.ParseFloat(match[1], 32) - if err != nil { - log.Warn("Unable to parse version of GRPC %v as a float", d.Version) - continue - } - if ver <= 1.2 { - p = p + "/v12" - } - } s, ok := contribIntegrations[p] if !ok { continue diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go index 8eb04c0f..192a6725 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go @@ -3,68 +3,17 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016 Datadog, Inc. -//go:build !go1.22 - -// TODO(knusbaum): This file should be deleted once go1.21 falls out of support package tracer import ( - cryptorand "crypto/rand" "math" - "math/big" - "math/rand" - "sync" - "time" - - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "math/rand/v2" ) -// random holds a thread-safe source of random numbers. -var random *rand.Rand - -func init() { - var seed int64 - n, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)) - if err == nil { - seed = n.Int64() - } else { - log.Warn("cannot generate random seed: %v; using current time", err) - seed = time.Now().UnixNano() - } - random = rand.New(&safeSource{ - source: rand.NewSource(seed), - }) -} - -// safeSource holds a thread-safe implementation of rand.Source64. -type safeSource struct { - source rand.Source - sync.Mutex -} - -func (rs *safeSource) Int63() int64 { - rs.Lock() - n := rs.source.Int63() - rs.Unlock() - - return n -} - -func (rs *safeSource) Uint64() uint64 { return uint64(rs.Int63()) } - -func (rs *safeSource) Seed(seed int64) { - rs.Lock() - rs.source.Seed(seed) - rs.Unlock() +func randUint64() uint64 { + return rand.Uint64() } -// generateSpanID returns a random uint64 that has been XORd with the startTime. -// This is done to get around the 32-bit random seed limitation that may create collisions if there is a large number -// of go services all generating spans. func generateSpanID(startTime int64) uint64 { - return random.Uint64() ^ uint64(startTime) -} - -func randUint64() uint64 { - return random.Uint64() + return rand.Uint64() & math.MaxInt64 } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand_go1_22.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand_go1_22.go deleted file mode 100644 index 9e7948e4..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand_go1_22.go +++ /dev/null @@ -1,21 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -//go:build go1.22 - -package tracer - -import ( - "math" - "math/rand/v2" -) - -func randUint64() uint64 { - return rand.Uint64() -} - -func generateSpanID(startTime int64) uint64 { - return rand.Uint64() & math.MaxInt64 -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go index 2cd911e3..037e3936 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rules_sampler.go @@ -19,6 +19,7 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames" + "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry" "golang.org/x/time/rate" ) @@ -533,6 +534,7 @@ const defaultRateLimit = 100.0 // The limit is DD_TRACE_RATE_LIMIT if set, `defaultRateLimit` otherwise. func newRateLimiter() *rateLimiter { limit := defaultRateLimit + origin := telemetry.OriginDefault v := os.Getenv("DD_TRACE_RATE_LIMIT") if v != "" { l, err := strconv.ParseFloat(v, 64) @@ -542,9 +544,11 @@ func newRateLimiter() *rateLimiter { log.Warn("DD_TRACE_RATE_LIMIT negative, using default value %f", limit) } else { // override the default limit + origin = telemetry.OriginEnvVar limit = l } } + reportTelemetryOnAppStarted(telemetry.Configuration{Name: "trace_rate_limit", Value: limit, Origin: origin}) return &rateLimiter{ limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))), prevTime: time.Now(), diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go index 14f816f9..c37af7d5 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go @@ -111,6 +111,9 @@ func (s *span) BaggageItem(key string) string { // SetTag adds a set of key/value metadata to the span. func (s *span) SetTag(key string, value interface{}) { + // To avoid dumping the memory address in case value is a pointer, we dereference it. + // Any pointer value that is a pointer to a pointer will be dumped as a string. + value = dereference(value) s.Lock() defer s.Unlock() // We don't lock spans when flushing, so we could have a data race when @@ -497,9 +500,11 @@ func (s *span) Finish(opts ...ddtrace.FinishOption) { s.SetTag("go_execution_traced", "partial") } - if tr, ok := internal.GetGlobalTracer().(*tracer); ok && tr.rulesSampling.traces.enabled() { - if !s.context.trace.isLocked() && s.context.trace.propagatingTag(keyDecisionMaker) != "-4" { - tr.rulesSampling.SampleTrace(s) + if s.root() == s { + if tr, ok := internal.GetGlobalTracer().(*tracer); ok && tr.rulesSampling.traces.enabled() { + if !s.context.trace.isLocked() && s.context.trace.propagatingTag(keyDecisionMaker) != "-4" { + tr.rulesSampling.SampleTrace(s) + } } } @@ -597,13 +602,21 @@ func newAggregableSpan(s *span, obfuscator *obfuscate.Obfuscator) *aggregableSpa statusCode = uint32(c) } } + var isTraceRoot trilean + if s.ParentID == 0 { + isTraceRoot = trileanTrue + } else { + isTraceRoot = trileanFalse + } + key := aggregation{ - Name: s.Name, - Resource: obfuscatedResource(obfuscator, s.Type, s.Resource), - Service: s.Service, - Type: s.Type, - Synthetics: strings.HasPrefix(s.Meta[keyOrigin], "synthetics"), - StatusCode: statusCode, + Name: s.Name, + Resource: obfuscatedResource(obfuscator, s.Type, s.Resource), + Service: s.Service, + Type: s.Type, + Synthetics: strings.HasPrefix(s.Meta[keyOrigin], "synthetics"), + StatusCode: statusCode, + IsTraceRoot: isTraceRoot, } return &aggregableSpan{ key: key, diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go index 720a2a02..8ad5da46 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats.go @@ -214,12 +214,13 @@ func (c *concentrator) flushAndSend(timenow time.Time, includeCurrent bool) { // aggregation specifies a uniquely identifiable key under which a certain set // of stats are grouped inside a bucket. type aggregation struct { - Name string - Type string - Resource string - Service string - StatusCode uint32 - Synthetics bool + Name string + Type string + Resource string + Service string + StatusCode uint32 + Synthetics bool + IsTraceRoot trilean } type rawBucket struct { @@ -278,6 +279,14 @@ func (sb *rawBucket) Export() statsBucket { return csb } +type trilean int32 + +const ( + trileanNotSet trilean = iota + trileanTrue + trileanFalse +) + type rawGroupedStats struct { hits uint64 topLevelHits uint64 @@ -285,6 +294,7 @@ type rawGroupedStats struct { duration uint64 okDistribution *ddsketch.DDSketch errDistribution *ddsketch.DDSketch + IsTraceRoot trilean } func newRawGroupedStats() *rawGroupedStats { @@ -335,6 +345,7 @@ func (s *rawGroupedStats) export(k aggregation) (groupedStats, error) { OkSummary: okSummary, ErrorSummary: errSummary, Synthetics: k.Synthetics, + IsTraceRoot: int32(k.IsTraceRoot), }, nil } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go index 35a68b46..3b77128b 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload.go @@ -53,4 +53,5 @@ type groupedStats struct { ErrorSummary []byte `json:"errorSummary,omitempty"` Synthetics bool `json:"synthetics,omitempty"` TopLevelHits uint64 `json:"topLevelHits,omitempty"` + IsTraceRoot int32 `json:"isTraceRoot,omitempty"` } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go index 7d15d036..70a3a0b4 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/stats_payload_msgp.go @@ -2,12 +2,9 @@ // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016 Datadog, Inc. - package tracer -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT +// Code generated by github.com/tinylib/msgp DO NOT EDIT. import ( "github.com/tinylib/msgp/msgp" @@ -20,83 +17,105 @@ func (z *groupedStats) DecodeMsg(dc *msgp.Reader) (err error) { var zb0001 uint32 zb0001, err = dc.ReadMapHeader() if err != nil { + err = msgp.WrapError(err) return } for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() if err != nil { + err = msgp.WrapError(err) return } switch msgp.UnsafeString(field) { case "Service": z.Service, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Service") return } case "Name": z.Name, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Name") return } case "Resource": z.Resource, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Resource") return } case "HTTPStatusCode": z.HTTPStatusCode, err = dc.ReadUint32() if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") return } case "Type": z.Type, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Type") return } case "DBType": z.DBType, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "DBType") return } case "Hits": z.Hits, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "Hits") return } case "Errors": z.Errors, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "Errors") return } case "Duration": z.Duration, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "Duration") return } case "OkSummary": z.OkSummary, err = dc.ReadBytes(z.OkSummary) if err != nil { + err = msgp.WrapError(err, "OkSummary") return } case "ErrorSummary": z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary) if err != nil { + err = msgp.WrapError(err, "ErrorSummary") return } case "Synthetics": z.Synthetics, err = dc.ReadBool() if err != nil { + err = msgp.WrapError(err, "Synthetics") return } case "TopLevelHits": z.TopLevelHits, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + case "IsTraceRoot": + z.IsTraceRoot, err = dc.ReadInt32() + if err != nil { + err = msgp.WrapError(err, "IsTraceRoot") return } default: err = dc.Skip() if err != nil { + err = msgp.WrapError(err) return } } @@ -106,14 +125,15 @@ func (z *groupedStats) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 13 + // map header, size 14 // write "Service" - err = en.Append(0x8d, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) + err = en.Append(0x8e, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65) if err != nil { return } err = en.WriteString(z.Service) if err != nil { + err = msgp.WrapError(err, "Service") return } // write "Name" @@ -123,6 +143,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Name) if err != nil { + err = msgp.WrapError(err, "Name") return } // write "Resource" @@ -132,6 +153,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Resource) if err != nil { + err = msgp.WrapError(err, "Resource") return } // write "HTTPStatusCode" @@ -141,6 +163,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint32(z.HTTPStatusCode) if err != nil { + err = msgp.WrapError(err, "HTTPStatusCode") return } // write "Type" @@ -150,6 +173,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Type) if err != nil { + err = msgp.WrapError(err, "Type") return } // write "DBType" @@ -159,6 +183,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.DBType) if err != nil { + err = msgp.WrapError(err, "DBType") return } // write "Hits" @@ -168,6 +193,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.Hits) if err != nil { + err = msgp.WrapError(err, "Hits") return } // write "Errors" @@ -177,6 +203,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.Errors) if err != nil { + err = msgp.WrapError(err, "Errors") return } // write "Duration" @@ -186,6 +213,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.Duration) if err != nil { + err = msgp.WrapError(err, "Duration") return } // write "OkSummary" @@ -195,6 +223,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteBytes(z.OkSummary) if err != nil { + err = msgp.WrapError(err, "OkSummary") return } // write "ErrorSummary" @@ -204,6 +233,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteBytes(z.ErrorSummary) if err != nil { + err = msgp.WrapError(err, "ErrorSummary") return } // write "Synthetics" @@ -213,6 +243,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteBool(z.Synthetics) if err != nil { + err = msgp.WrapError(err, "Synthetics") return } // write "TopLevelHits" @@ -222,6 +253,17 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.TopLevelHits) if err != nil { + err = msgp.WrapError(err, "TopLevelHits") + return + } + // write "IsTraceRoot" + err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74) + if err != nil { + return + } + err = en.WriteInt32(z.IsTraceRoot) + if err != nil { + err = msgp.WrapError(err, "IsTraceRoot") return } return @@ -229,7 +271,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *groupedStats) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 12 + msgp.Int32Size return } @@ -240,29 +282,34 @@ func (z *statsBucket) DecodeMsg(dc *msgp.Reader) (err error) { var zb0001 uint32 zb0001, err = dc.ReadMapHeader() if err != nil { + err = msgp.WrapError(err) return } for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() if err != nil { + err = msgp.WrapError(err) return } switch msgp.UnsafeString(field) { case "Start": z.Start, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "Start") return } case "Duration": z.Duration, err = dc.ReadUint64() if err != nil { + err = msgp.WrapError(err, "Duration") return } case "Stats": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { + err = msgp.WrapError(err, "Stats") return } if cap(z.Stats) >= int(zb0002) { @@ -273,12 +320,14 @@ func (z *statsBucket) DecodeMsg(dc *msgp.Reader) (err error) { for za0001 := range z.Stats { err = z.Stats[za0001].DecodeMsg(dc) if err != nil { + err = msgp.WrapError(err, "Stats", za0001) return } } default: err = dc.Skip() if err != nil { + err = msgp.WrapError(err) return } } @@ -296,6 +345,7 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.Start) if err != nil { + err = msgp.WrapError(err, "Start") return } // write "Duration" @@ -305,6 +355,7 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteUint64(z.Duration) if err != nil { + err = msgp.WrapError(err, "Duration") return } // write "Stats" @@ -314,11 +365,13 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteArrayHeader(uint32(len(z.Stats))) if err != nil { + err = msgp.WrapError(err, "Stats") return } for za0001 := range z.Stats { err = z.Stats[za0001].EncodeMsg(en) if err != nil { + err = msgp.WrapError(err, "Stats", za0001) return } } @@ -341,34 +394,40 @@ func (z *statsPayload) DecodeMsg(dc *msgp.Reader) (err error) { var zb0001 uint32 zb0001, err = dc.ReadMapHeader() if err != nil { + err = msgp.WrapError(err) return } for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() if err != nil { + err = msgp.WrapError(err) return } switch msgp.UnsafeString(field) { case "Hostname": z.Hostname, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Hostname") return } case "Env": z.Env, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Env") return } case "Version": z.Version, err = dc.ReadString() if err != nil { + err = msgp.WrapError(err, "Version") return } case "Stats": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { + err = msgp.WrapError(err, "Stats") return } if cap(z.Stats) >= int(zb0002) { @@ -379,12 +438,14 @@ func (z *statsPayload) DecodeMsg(dc *msgp.Reader) (err error) { for za0001 := range z.Stats { err = z.Stats[za0001].DecodeMsg(dc) if err != nil { + err = msgp.WrapError(err, "Stats", za0001) return } } default: err = dc.Skip() if err != nil { + err = msgp.WrapError(err) return } } @@ -402,6 +463,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Hostname) if err != nil { + err = msgp.WrapError(err, "Hostname") return } // write "Env" @@ -411,6 +473,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Env) if err != nil { + err = msgp.WrapError(err, "Env") return } // write "Version" @@ -420,6 +483,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteString(z.Version) if err != nil { + err = msgp.WrapError(err, "Version") return } // write "Stats" @@ -429,11 +493,13 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) { } err = en.WriteArrayHeader(uint32(len(z.Stats))) if err != nil { + err = msgp.WrapError(err, "Stats") return } for za0001 := range z.Stats { err = z.Stats[za0001].EncodeMsg(en) if err != nil { + err = msgp.WrapError(err, "Stats", za0001) return } } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go index 3f9a7d2f..3fa70b4e 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/telemetry.go @@ -12,6 +12,12 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry" ) +var additionalConfigs []telemetry.Configuration + +func reportTelemetryOnAppStarted(c telemetry.Configuration) { + additionalConfigs = append(additionalConfigs, c) +} + // startTelemetry starts the global instrumentation telemetry client with tracer data // unless instrumentation telemetry is disabled via the DD_INSTRUMENTATION_TELEMETRY_ENABLED // env var. @@ -44,7 +50,8 @@ func startTelemetry(c *config) { {Name: "service", Value: c.serviceName}, {Name: "universal_version", Value: c.universalVersion}, {Name: "env", Value: c.env}, - {Name: "agent_url", Value: c.agentURL.String()}, + {Name: "version", Value: c.version}, + {Name: "trace_agent_url", Value: c.agentURL.String()}, {Name: "agent_hostname", Value: c.hostname}, {Name: "runtime_metrics_enabled", Value: c.runtimeMetrics}, {Name: "dogstatsd_addr", Value: c.dogstatsdAddr}, @@ -102,5 +109,6 @@ func startTelemetry(c *config) { telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "orchestrion_" + k, Value: v}) } } + telemetryConfigs = append(telemetryConfigs, additionalConfigs...) telemetry.GlobalClient.ProductChange(telemetry.NamespaceTracers, true, telemetryConfigs) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go index cd9b7860..1fee0ba6 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go @@ -507,7 +507,7 @@ func getDatadogPropagator(cp *chainedPropagator) *propagator { // if the reparenting ID is not set on the context, the span ID from datadog headers is used. func overrideDatadogParentID(ctx, w3cCtx, ddCtx *spanContext) { ctx.spanID = w3cCtx.spanID - if w3cCtx.reparentID != "" && w3cCtx.reparentID != "0000000000000000" { + if w3cCtx.reparentID != "" { ctx.reparentID = w3cCtx.reparentID } else if ddCtx != nil { // NIT: could be done without using fmt.Sprintf? Is it worth it? @@ -967,7 +967,7 @@ func composeTracestate(ctx *spanContext, priority int, oldState string) string { if !ctx.isRemote { b.WriteString(";p:") b.WriteString(spanIDHexEncoded(ctx.SpanID(), 16)) - } else if ctx.reparentID != "" && ctx.reparentID != "0000000000000000" { + } else if ctx.reparentID != "" { b.WriteString(";p:") b.WriteString(ctx.reparentID) } @@ -1202,10 +1202,6 @@ func parseTracestate(ctx *spanContext, header string) { setPropagatingTag(ctx, "_dd.p."+keySuffix, val) } } - // if dd list-member is present and last parent is not set, set it to zeros - if ctx.reparentID == "" { - ctx.reparentID = "0000000000000000" - } } } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go index af794c78..a52c65f4 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go @@ -81,6 +81,11 @@ type tracer struct { // finished, and dropped spansStarted, spansFinished, tracesDropped uint32 + // Keeps track of the total number of traces dropped for accurate logging. + totalTracesDropped uint32 + + logDroppedTraces *time.Ticker + // Records the number of dropped P0 traces and spans. droppedP0Traces, droppedP0Spans uint32 @@ -276,6 +281,7 @@ func newUnstartedTracer(opts ...StartOption) *tracer { rulesSampling: rulesSampler, prioritySampling: sampler, pid: os.Getpid(), + logDroppedTraces: time.NewTicker(1 * time.Second), stats: newConcentrator(c, defaultStatsBucketSize), obfuscator: obfuscate.NewObfuscator(obfuscate.Config{ SQL: obfuscate.SQLConfig{ @@ -451,7 +457,15 @@ func (t *tracer) pushChunk(trace *chunk) { select { case t.out <- trace: default: - log.Error("payload queue full, dropping %d traces", len(trace.spans)) + log.Debug("payload queue full, trace dropped %d spans", len(trace.spans)) + atomic.AddUint32(&t.totalTracesDropped, 1) + } + select { + case <-t.logDroppedTraces.C: + if t := atomic.SwapUint32(&t.totalTracesDropped, 0); t > 0 { + log.Error("%d traces dropped through payload queue", t) + } + default: } } @@ -735,6 +749,9 @@ func (t *tracer) sample(span *span) { if t.rulesSampling.SampleTraceGlobalRate(span) { return } + if t.rulesSampling.SampleTrace(span) { + return + } t.prioritySampling.apply(span) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go index 67ee1614..bd3c03a8 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go @@ -20,6 +20,8 @@ import ( func toFloat64(value interface{}) (f float64, ok bool) { const max = (int64(1) << 53) - 1 const min = -max + // If any other type is added here, remember to add it to the type switch in + // the `span.SetTag` function to handle pointers to these supported types. switch i := value.(type) { case byte: return float64(i), true @@ -122,3 +124,54 @@ func parsePropagatableTraceTags(s string) (map[string]string, error) { tags[key] = s[start:] return tags, nil } + +func dereference(value any) any { + // Falling into one of the cases will dereference the pointer and return the + // value of the pointer. It adds one allocation due to casting. + switch value.(type) { + case *bool: + return dereferenceGeneric(value.(*bool)) + case *string: + return dereferenceGeneric(value.(*string)) + // Supported type by toFloat64 + case *byte: + return dereferenceGeneric(value.(*byte)) + case *float32: + return dereferenceGeneric(value.(*float32)) + case *float64: + return dereferenceGeneric(value.(*float64)) + case *int: + return dereferenceGeneric(value.(*int)) + case *int8: + return dereferenceGeneric(value.(*int8)) + case *int16: + return dereferenceGeneric(value.(*int16)) + case *int32: + return dereferenceGeneric(value.(*int32)) + case *int64: + return dereferenceGeneric(value.(*int64)) + case *uint: + return dereferenceGeneric(value.(*uint)) + case *uint16: + return dereferenceGeneric(value.(*uint16)) + case *uint32: + return dereferenceGeneric(value.(*uint32)) + case *uint64: + return dereferenceGeneric(value.(*uint64)) + case *samplernames.SamplerName: + v := value.(*samplernames.SamplerName) + if v == nil { + return samplernames.Unknown + } + return *v + } + return value +} + +func dereferenceGeneric[T any](value *T) T { + if value == nil { + var v T + return v + } + return *value +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/README.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/README.md new file mode 100644 index 00000000..d693d883 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/README.md @@ -0,0 +1,147 @@ +# Appsec Go Design + +This document describes the design of the `internal/appsec` package and everything under it. This package is responsible +for securing the application by monitoring the operations that are executed by the application and applying actions in +case a security threats is detected. + +Most of the work is to forward information to the module `github.com/DataDog/go-libddwaf` which contains the WAF +(Web Application Firewall) engine. The WAF does most of the decision making about events and actions. Our goal is to +connect the different parts of the application and the WAF engine while keeping up to date the various sources of +configuration that the WAF engine uses. + +### Instrumentation Gateway: Dyngo + +Having the customer (or orchestrion) instrument their code is the hardest part of the job. That's why we want to provide +the simplest API possible for them to use. This means loosing the flexibility or enabling and disabling multiple +products and features at runtime. Flexibility that we still want to provide to the customer, that's why behind every +API entrypoint present in `dd-trace-go/contrib` that support appsec is a call to the `internal/appsec/dyngo` package. + +```mermaid +flowchart LR + +UserCode[User Code] --> Instrumentation --> IG{Instrumentation Gateway} -----> Listener +``` + +Dyngo is a context-scoped event listener system that provide a way to listen dynamically to events that are happening in +the customer code and to react to configuration changes and hot-swap event listeners at runtime. + +```mermaid +flowchart LR + +UserCode[User Code] --> appsec/emitter --> IG{dyngo} -----> appsec/listener +``` + +### Operation definition requirements + +* Each operation must have a `Start*` and a `Finish` method covering calls to dyngo. +* The content of the arguments and results should not require any external package, at most the standard library. + +Example operation: + +```go +package main + +import ( + "context" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +type ( + ExampleOperation struct { + dyngo.Operation + } + + ExampleOperationArgs struct { + Type string + } + + ExampleOperationResult struct { + Code int + } +) + +func (ExampleOperationArgs) IsArgOf(*ExampleOperation) {} +func (ExampleOperationResult) IsResultOf(*ExampleOperation) {} + +func StartExampleOperation(ctx context.Context, args ExampleOperationArgs) *ExampleOperation { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Error("No parent operation found") + return nil + } + op := &ExampleOperation{ + Operation: dyngo.NewOperation(parent), + } + return dyngo.StartOperation(op, args) +} + +func (op *ExampleOperation) Finish(result ExampleOperationResult) { + dyngo.FinishOperation(op, result) +} +``` + +> [!CAUTION] +> Importing external packages in the operation definition will probably cause circular dependencies. This is because +> the operation definition can be used in the package is will instrument, and the package that will instrument it will +> probably import the operation definition. + +### Operation Stack + +Current state of the possible operation stacks + +```mermaid +flowchart TD + + subgraph Top Level Operation + SES[trace.ServiceEntrySpanOperation] + + Context[waf.ContextOperation] + + HTTPH[httpsec.HandlerOperation] + GRPCH[grpcsec.HandlerOperation] + GQL[graphqlsec.RequestOperation] + end + + subgraph HTTP + RequestBody([httpsec.MonitorRequestBody]) + Roundtripper[httpsec.RoundTripOperation] + end + + subgraph GRPC + RequestMessage([grpcsec.MonitorRequestMessage]) + ResponseMessage([grpcsec.MonitorResponseMessage]) + end + + subgraph GraphQL + Exec[graphqlsec.ExecutionOperation] + Resolve[graphqlsec.ResolveOperation] + end + + Code{User Code} + + SES --> Context + Context --> HTTPH --> Code + Context --> GRPCH --> Code + Context --> GQL + + GQL --> Exec --> Resolve --> Code + + Code --> RequestBody + + Code --> RequestMessage + Code --> ResponseMessage + + Code --> Span[trace.SpanOperation] + + Span --> Roundtripper + Span --> OS[ossec.OpenOperation] + Span --> SQL[sqlsec.SQLOperation] + Span --> User[usersec.UserOperation] +``` + +> [!IMPORTANT] +> Please note that this is how the operation SHOULD be stacked. If the user code does not have a Top Level Operation +> then nothing will be monitored. In this case an error log should be produced to explain thouroughly the issue to +> the user. diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go index ae68d0d6..0dc042ca 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/appsec.go @@ -9,13 +9,13 @@ import ( "fmt" "sync" - "github.com/DataDog/appsec-internal-go/limiter" - appsecLog "github.com/DataDog/appsec-internal-go/log" - waf "github.com/DataDog/go-libddwaf/v3" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + + appsecLog "github.com/DataDog/appsec-internal-go/log" + waf "github.com/DataDog/go-libddwaf/v3" ) // Enabled returns true when AppSec is up and running. Meaning that the appsec build tag is enabled, the env var @@ -134,10 +134,10 @@ func setActiveAppSec(a *appsec) { } type appsec struct { - cfg *config.Config - limiter *limiter.TokenTicker - wafHandle *waf.Handle - started bool + cfg *config.Config + features []listener.Feature + featuresMu sync.Mutex + started bool } func newAppSec(cfg *config.Config) *appsec { @@ -160,11 +160,8 @@ func (a *appsec) start(telemetry *appsecTelemetry) error { log.Error("appsec: non-critical error while loading libddwaf: %v", err) } - a.limiter = limiter.NewTokenTicker(a.cfg.TraceRateLimit, a.cfg.TraceRateLimit) - a.limiter.Start() - - // Register the WAF operation event listener - if err := a.swapWAF(a.cfg.RulesManager.Latest); err != nil { + // Register dyngo listeners + if err := a.SwapRootOperation(); err != nil { return err } @@ -193,15 +190,23 @@ func (a *appsec) stop() { // Disable RC blocking first so that the following is guaranteed not to be concurrent anymore. a.disableRCBlocking() + a.featuresMu.Lock() + defer a.featuresMu.Unlock() + // Disable the currently applied instrumentation dyngo.SwapRootOperation(nil) - if a.wafHandle != nil { - a.wafHandle.Close() - a.wafHandle = nil - } + + // Reset rules edits received from the remote configuration + // We skip the error because we can't do anything about and it was already logged in config.NewRulesManager + a.cfg.RulesManager, _ = config.NewRulesManager(nil) + // TODO: block until no more requests are using dyngo operations - a.limiter.Stop() + for _, feature := range a.features { + feature.Stop() + } + + a.features = nil } func init() { diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/config.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/config.go index e2a0b773..6ffcbafc 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/config.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/config.go @@ -68,6 +68,30 @@ type Config struct { // RC is the remote configuration client used to receive product configuration updates. Nil if RC is disabled (default) RC *remoteconfig.ClientConfig RASP bool + // SupportedAddresses are the addresses that the AppSec listener will bind to. + SupportedAddresses AddressSet +} + +// AddressSet is a set of WAF addresses. +type AddressSet map[string]struct{} + +func NewAddressSet(addrs []string) AddressSet { + set := make(AddressSet, len(addrs)) + for _, addr := range addrs { + set[addr] = struct{}{} + } + return set +} + +// AnyOf returns true if any of the addresses in the set are in the given list. +func (set AddressSet) AnyOf(anyOf ...string) bool { + for _, addr := range anyOf { + if _, ok := set[addr]; ok { + return true + } + } + + return false } // WithRCConfig sets the AppSec remote config client configuration to the specified cfg @@ -105,7 +129,7 @@ func NewConfig() (*Config, error) { return nil, err } - r, err := NewRulesManeger(rules) + r, err := NewRulesManager(rules) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/rules_manager.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/rules_manager.go index a61f68e8..e4e003ed 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/rules_manager.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config/rules_manager.go @@ -8,6 +8,7 @@ package config import ( "encoding/json" "fmt" + "slices" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" @@ -28,24 +29,21 @@ type ( } // RulesFragment can represent a full ruleset or a fragment of it. RulesFragment struct { - Version string `json:"version,omitempty"` - Metadata any `json:"metadata,omitempty"` - Rules []any `json:"rules,omitempty"` - Overrides []any `json:"rules_override,omitempty"` - Exclusions []any `json:"exclusions,omitempty"` - RulesData []RuleDataEntry `json:"rules_data,omitempty"` - Actions []any `json:"actions,omitempty"` - CustomRules []any `json:"custom_rules,omitempty"` - Processors []any `json:"processors,omitempty"` - Scanners []any `json:"scanners,omitempty"` + Version string `json:"version,omitempty"` + Metadata any `json:"metadata,omitempty"` + Rules []any `json:"rules,omitempty"` + Overrides []any `json:"rules_override,omitempty"` + Exclusions []any `json:"exclusions,omitempty"` + ExclusionData []DataEntry `json:"exclusion_data,omitempty"` + RulesData []DataEntry `json:"rules_data,omitempty"` + Actions []any `json:"actions,omitempty"` + CustomRules []any `json:"custom_rules,omitempty"` + Processors []any `json:"processors,omitempty"` + Scanners []any `json:"scanners,omitempty"` } - // RuleDataEntry represents an entry in the "rules_data" top level field of a rules file - RuleDataEntry rc.ASMDataRuleData - // RulesData is a slice of RulesDataEntry - RulesData struct { - RulesData []RuleDataEntry `json:"rules_data"` - } + // DataEntry represents an entry in the "rules_data" top level field of a rules file + DataEntry rc.ASMDataRuleData ) // DefaultRulesFragment returns a RulesFragment created using the default static recommended rules @@ -60,27 +58,20 @@ func DefaultRulesFragment() RulesFragment { func (f *RulesFragment) clone() (clone RulesFragment) { clone.Version = f.Version clone.Metadata = f.Metadata - clone.Overrides = cloneSlice(f.Overrides) - clone.Exclusions = cloneSlice(f.Exclusions) - clone.RulesData = cloneSlice(f.RulesData) - clone.CustomRules = cloneSlice(f.CustomRules) - clone.Processors = cloneSlice(f.Processors) - clone.Scanners = cloneSlice(f.Scanners) - // TODO (Francois Mazeau): copy more fields once we handle them + clone.Overrides = slices.Clone(f.Overrides) + clone.Exclusions = slices.Clone(f.Exclusions) + clone.ExclusionData = slices.Clone(f.ExclusionData) + clone.RulesData = slices.Clone(f.RulesData) + clone.CustomRules = slices.Clone(f.CustomRules) + clone.Processors = slices.Clone(f.Processors) + clone.Scanners = slices.Clone(f.Scanners) return } -func cloneSlice[T any](slice []T) []T { - // TODO: use slices.Clone once go1.21 is the min supported go runtime. - clone := make([]T, len(slice), cap(slice)) - copy(clone, slice) - return clone -} - -// NewRulesManeger initializes and returns a new RulesManager using the provided rules. +// NewRulesManager initializes and returns a new RulesManager using the provided rules. // If no rules are provided (nil), the default rules are used instead. // If the provided rules are invalid, an error is returned -func NewRulesManeger(rules []byte) (*RulesManager, error) { +func NewRulesManager(rules []byte) (*RulesManager, error) { var f RulesFragment if rules == nil { f = DefaultRulesFragment() @@ -135,6 +126,7 @@ func (r *RulesManager) Compile() { for _, v := range r.Edits { r.Latest.Overrides = append(r.Latest.Overrides, v.Overrides...) r.Latest.Exclusions = append(r.Latest.Exclusions, v.Exclusions...) + r.Latest.ExclusionData = append(r.Latest.ExclusionData, v.ExclusionData...) r.Latest.Actions = append(r.Latest.Actions, v.Actions...) r.Latest.RulesData = append(r.Latest.RulesData, v.RulesData...) r.Latest.CustomRules = append(r.Latest.CustomRules, v.CustomRules...) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/README.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/README.md new file mode 100644 index 00000000..c350c3c3 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/README.md @@ -0,0 +1,25 @@ +## GraphQL Threat Monitoring + +This package provides `dyngo` support for GraphQL operations, which are listened +to according to the following sequence diagram: + +```mermaid +sequenceDiagram + participant Root + participant Request + participant Execution + participant Field + + Root ->>+ Request: graphqlsec.StartRequest(...) + + Request ->>+ Execution: grapgqlsec.StartExecution(...) + + par for each field + Execution ->>+ Field: graphqlsec.StartField(...) + Field -->>- Execution: field.Finish(...) + end + + Execution -->>- Request: execution.Finish(...) + + Request -->>- Root: request.Finish(...) +``` diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/execution.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/execution.go new file mode 100644 index 00000000..0599d5e1 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/execution.go @@ -0,0 +1,66 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package graphql is the GraphQL instrumentation API and contract for AppSec +// defining an abstract run-time representation of AppSec middleware. GraphQL +// integrations must use this package to enable AppSec features for GraphQL, +// which listens to this package's operation events. +package graphqlsec + +import ( + "context" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" +) + +type ( + ExecutionOperation struct { + dyngo.Operation + } + + // ExecutionOperationArgs describes arguments passed to a GraphQL query operation. + ExecutionOperationArgs struct { + // Variables is the user-provided variables object for the query. + Variables map[string]any + // Query is the query that is being executed. + Query string + // OperationName is the user-provided operation name for the query. + OperationName string + } + + ExecutionOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL query operation, along with the given results, and emit a finish event up in +// the operation stack. +func (q *ExecutionOperation) Finish(res ExecutionOperationRes) { + dyngo.FinishOperation(q, res) +} + +func (ExecutionOperationArgs) IsArgOf(*ExecutionOperation) {} +func (ExecutionOperationRes) IsResultOf(*ExecutionOperation) {} + +// StartExecutionOperation starts a new GraphQL query operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is tracked on the returned context, +// and can be extracted later on using FromContext. +func StartExecutionOperation(ctx context.Context, args ExecutionOperationArgs) (context.Context, *ExecutionOperation) { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Debug("appsec: StartExecutionOperation: no parent operation found in context") + } + + op := &ExecutionOperation{ + Operation: dyngo.NewOperation(parent), + } + + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/request.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/request.go new file mode 100644 index 00000000..70e1f6df --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/request.go @@ -0,0 +1,69 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package graphql is the GraphQL instrumentation API and contract for AppSec +// defining an abstract run-time representation of AppSec middleware. GraphQL +// integrations must use this package to enable AppSec features for GraphQL, +// which listens to this package's operation events. +package graphqlsec + +import ( + "context" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" +) + +type ( + RequestOperation struct { + dyngo.Operation + // used in case we don't have a parent operation + *waf.ContextOperation + } + + // RequestOperationArgs describes arguments passed to a GraphQL request. + RequestOperationArgs struct { + RawQuery string // The raw, not-yet-parsed GraphQL query + OperationName string // The user-provided operation name for the query + Variables map[string]any // The user-provided variables object for this request + } + + RequestOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL query operation, along with the given results, and emit a finish event up in +// the operation stack. +func (op *RequestOperation) Finish(span trace.TagSetter, res RequestOperationRes) { + dyngo.FinishOperation(op, res) + if op.ContextOperation != nil { + op.ContextOperation.Finish(span) + } +} + +func (RequestOperationArgs) IsArgOf(*RequestOperation) {} +func (RequestOperationRes) IsResultOf(*RequestOperation) {} + +// StartRequestOperation starts a new GraphQL request operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is usually linked to tge global root +// operation. The operation is tracked on the returned context, and can be extracted later on using +// FromContext. +func StartRequestOperation(ctx context.Context, args RequestOperationArgs) (context.Context, *RequestOperation) { + parent, ok := dyngo.FromContext(ctx) + op := &RequestOperation{} + if !ok { // Usually we can find the HTTP Handler Operation as the parent but it's technically optional + op.ContextOperation, ctx = waf.StartContextOperation(ctx) + op.Operation = dyngo.NewOperation(op.ContextOperation) + } else { + op.Operation = dyngo.NewOperation(parent) + } + + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/resolve.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/resolve.go new file mode 100644 index 00000000..b7f5fe06 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/resolve.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package graphqlsec + +import ( + "context" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" +) + +type ( + ResolveOperation struct { + dyngo.Operation + } + + // ResolveOperationArgs describes arguments passed to a GraphQL field operation. + ResolveOperationArgs struct { + // TypeName is the name of the field's type + TypeName string + // FieldName is the name of the field + FieldName string + // Arguments is the arguments provided to the field resolver + Arguments map[string]any + // Trivial determines whether the resolution is trivial or not. Leave as false if undetermined. + Trivial bool + } + + ResolveOperationRes struct { + // Data is the data returned from processing the GraphQL operation. + Data any + // Error is the error returned by processing the GraphQL Operation, if any. + Error error + } +) + +// Finish the GraphQL Field operation, along with the given results, and emit a finish event up in +// the operation stack. +func (q *ResolveOperation) Finish(res ResolveOperationRes) { + dyngo.FinishOperation(q, res) +} + +func (ResolveOperationArgs) IsArgOf(*ResolveOperation) {} +func (ResolveOperationRes) IsResultOf(*ResolveOperation) {} + +// StartResolveOperation starts a new GraphQL Resolve operation, along with the given arguments, and +// emits a start event up in the operation stack. The operation is tracked on the returned context, +// and can be extracted later on using FromContext. +func StartResolveOperation(ctx context.Context, args ResolveOperationArgs) (context.Context, *ResolveOperation) { + parent, ok := dyngo.FromContext(ctx) + if !ok { + log.Debug("appsec: StartResolveOperation: no parent operation found in context") + } + + op := &ResolveOperation{ + Operation: dyngo.NewOperation(parent), + } + return dyngo.StartAndRegisterOperation(ctx, op, args), op +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/grpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/grpc.go new file mode 100644 index 00000000..e6b28e31 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/grpc.go @@ -0,0 +1,116 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +// Package grpcsec is the gRPC instrumentation API and contract for AppSec +// defining an abstract run-time representation of gRPC handlers. +// gRPC integrations must use this package to enable AppSec features for gRPC, +// which listens to this package's operation events. +// +// Abstract gRPC server handler operation definitions. It is based on two +// operations allowing to describe every type of RPC: the HandlerOperation type +// which represents the RPC handler, and the ReceiveOperation type which +// represents the messages the RPC handler receives during its lifetime. +// This means that the ReceiveOperation(s) will happen within the +// HandlerOperation. +// Every type of RPC, unary, client streaming, server streaming, and +// bidirectional streaming RPCs, can be all represented with a HandlerOperation +// having one or several ReceiveOperation. +// The send operation is not required for now and therefore not defined, which +// means that server and bidirectional streaming RPCs currently have the same +// run-time representation as unary and client streaming RPCs. +package grpcsec + +import ( + "context" + "sync/atomic" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" +) + +type ( + // HandlerOperation represents a gRPC server handler operation. + // It must be created with StartHandlerOperation() and finished with its + // Finish() method. + // Security events observed during the operation lifetime should be added + // to the operation using its AddSecurityEvent() method. + HandlerOperation struct { + dyngo.Operation + *waf.ContextOperation + } + + // HandlerOperationArgs is the grpc handler arguments. + HandlerOperationArgs struct { + // Method is the gRPC method name. + // Corresponds to the address `grpc.server.method`. + Method string + + // RPC metadata received by the gRPC handler. + // Corresponds to the address `grpc.server.request.metadata`. + Metadata map[string][]string + + // RemoteAddr is the IP address of the client that initiated the gRPC request. + // May be used as the address `http.client_ip`. + RemoteAddr string + } + + // HandlerOperationRes is the grpc handler results. Empty as of today. + HandlerOperationRes struct { + // Raw gRPC status code. + // Corresponds to the address `grpc.server.response.status`. + StatusCode int + } +) + +func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {} +func (HandlerOperationRes) IsResultOf(*HandlerOperation) {} + +// StartHandlerOperation starts an gRPC server handler operation, along with the +// given arguments and parent operation, and emits a start event up in the +// operation stack. When parent is nil, the operation is linked to the global +// root operation. +func StartHandlerOperation(ctx context.Context, args HandlerOperationArgs) (context.Context, *HandlerOperation, *atomic.Pointer[actions.BlockGRPC]) { + wafOp, ctx := waf.StartContextOperation(ctx) + op := &HandlerOperation{ + Operation: dyngo.NewOperation(wafOp), + ContextOperation: wafOp, + } + + var block atomic.Pointer[actions.BlockGRPC] + dyngo.OnData(op, func(err *actions.BlockGRPC) { + block.Store(err) + }) + + return dyngo.StartAndRegisterOperation(ctx, op, args), op, &block +} + +// MonitorRequestMessage monitors the gRPC request message body as the WAF address `grpc.server.request.message`. +func MonitorRequestMessage(ctx context.Context, msg any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithGRPCRequestMessage(msg). + Build(), + "appsec: failed to monitor gRPC request message body") +} + +// MonitorResponseMessage monitors the gRPC response message body as the WAF address `grpc.server.response.message`. +func MonitorResponseMessage(ctx context.Context, msg any) error { + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithGRPCResponseMessage(msg). + Build(), + "appsec: failed to monitor gRPC response message body") + +} + +// Finish the gRPC handler operation, along with the given results, and emit a +// finish event up in the operation stack. +func (op *HandlerOperation) Finish(span trace.TagSetter, res HandlerOperationRes) { + dyngo.FinishOperation(op, res) + op.ContextOperation.Finish(span) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/http.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/http.go index 37c56ef2..8552a713 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/http.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/http.go @@ -12,49 +12,100 @@ package httpsec import ( "context" - // Blank import needed to use embed for the default blocked response payloads _ "embed" "net/http" - "strings" + "sync" + "sync/atomic" - "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace" - - "github.com/DataDog/appsec-internal-go/netip" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" ) +// HandlerOperation type representing an HTTP operation. It must be created with +// StartOperation() and finished with its Finish(). +type ( + HandlerOperation struct { + dyngo.Operation + *waf.ContextOperation + mu sync.RWMutex + } + + // HandlerOperationArgs is the HTTP handler operation arguments. + HandlerOperationArgs struct { + Method string + RequestURI string + Host string + RemoteAddr string + Headers map[string][]string + Cookies map[string][]string + QueryParams map[string][]string + PathParams map[string]string + } + + // HandlerOperationRes is the HTTP handler operation results. + HandlerOperationRes struct { + Headers map[string][]string + StatusCode int + } +) + +func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {} +func (HandlerOperationRes) IsResultOf(*HandlerOperation) {} + +func StartOperation(ctx context.Context, args HandlerOperationArgs) (*HandlerOperation, *atomic.Pointer[actions.BlockHTTP], context.Context) { + wafOp, ctx := waf.StartContextOperation(ctx) + op := &HandlerOperation{ + Operation: dyngo.NewOperation(wafOp), + ContextOperation: wafOp, + } + + // We need to use an atomic pointer to store the action because the action may be created asynchronously in the future + var action atomic.Pointer[actions.BlockHTTP] + dyngo.OnData(op, func(a *actions.BlockHTTP) { + action.Store(a) + }) + + return op, &action, dyngo.StartAndRegisterOperation(ctx, op, args) +} + +// Finish the HTTP handler operation and its children operations and write everything to the service entry span. +func (op *HandlerOperation) Finish(res HandlerOperationRes, span ddtrace.Span) { + dyngo.FinishOperation(op, res) + op.ContextOperation.Finish(span) +} + +const monitorBodyErrorLog = ` +"appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context: + the request handler is not being monitored by a middleware function or the provided context is not the expected request context +` + // MonitorParsedBody starts and finishes the SDK body operation. // This function should not be called when AppSec is disabled in order to // get preciser error logs. func MonitorParsedBody(ctx context.Context, body any) error { - parent, _ := dyngo.FromContext(ctx) - if parent == nil { - log.Error("appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context") - return nil - } - - return ExecuteSDKBodyOperation(parent, types.SDKBodyOperationArgs{Body: body}) + return waf.RunSimple(ctx, + addresses.NewAddressesBuilder(). + WithRequestBody(body). + Build(), + monitorBodyErrorLog, + ) } -// ExecuteSDKBodyOperation starts and finishes the SDK Body operation by emitting a dyngo start and finish events -// An error is returned if the body associated to that operation must be blocked -func ExecuteSDKBodyOperation(parent dyngo.Operation, args types.SDKBodyOperationArgs) error { - var err error - op := &types.SDKBodyOperation{Operation: dyngo.NewOperation(parent)} - dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { - err = e - }) - dyngo.StartOperation(op, args) - dyngo.FinishOperation(op, types.SDKBodyOperationRes{}) - return err +// Return the map of parsed cookies if any and following the specification of +// the rule address `server.request.cookies`. +func makeCookies(parsed []*http.Cookie) map[string][]string { + if len(parsed) == 0 { + return nil + } + cookies := make(map[string][]string, len(parsed)) + for _, c := range parsed { + cookies[c.Name] = append(cookies[c.Name], c.Value) + } + return cookies } // WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and @@ -71,132 +122,47 @@ func WrapHandler(handler http.Handler, span ddtrace.Span, pathParams map[string] opts.ResponseHeaderCopier = defaultWrapHandlerConfig.ResponseHeaderCopier } - trace.SetAppSecEnabledTags(span) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ipTags, clientIP := httptrace.ClientIPTags(r.Header, true, r.RemoteAddr) - log.Debug("appsec: http client ip detection returned `%s` given the http headers `%v`", clientIP, r.Header) - trace.SetTags(span, ipTags) - - var bypassHandler http.Handler - var blocking bool - var stackTrace *stacktrace.Event - args := MakeHandlerOperationArgs(r, clientIP, pathParams) - ctx, op := StartOperation(r.Context(), args, func(op *types.Operation) { - dyngo.OnData(op, func(a *sharedsec.HTTPAction) { - blocking = true - bypassHandler = a.Handler - }) - dyngo.OnData(op, func(a *sharedsec.StackTraceAction) { - stackTrace = &a.Event - }) + op, blockAtomic, ctx := StartOperation(r.Context(), HandlerOperationArgs{ + Method: r.Method, + RequestURI: r.RequestURI, + Host: r.Host, + RemoteAddr: r.RemoteAddr, + Headers: r.Header, + Cookies: makeCookies(r.Cookies()), + QueryParams: r.URL.Query(), + PathParams: pathParams, }) r = r.WithContext(ctx) defer func() { - events := op.Finish(MakeHandlerOperationRes(w)) + var statusCode int + if res, ok := w.(interface{ Status() int }); ok { + statusCode = res.Status() + } + op.Finish(HandlerOperationRes{ + Headers: opts.ResponseHeaderCopier(w), + StatusCode: statusCode, + }, span) // Execute the onBlock functions to make sure blocking works properly // in case we are instrumenting the Gin framework - if blocking { - op.SetTag(trace.BlockedRequestTag, true) + if blockPtr := blockAtomic.Load(); blockPtr != nil { for _, f := range opts.OnBlock { f() } - } - // Add stacktraces to the span, if any - if stackTrace != nil { - stacktrace.AddToSpan(span, stackTrace) - } - - if bypassHandler != nil { - bypassHandler.ServeHTTP(w, r) - } - - // Add the request headers span tags out of args.Headers instead of r.Header as it was normalized and some - // extra headers have been added such as the Host header which is removed from the original Go request headers - // map - setRequestHeadersTags(span, args.Headers) - setResponseHeadersTags(span, opts.ResponseHeaderCopier(w)) - trace.SetTags(span, op.Tags()) - if len(events) > 0 { - httptrace.SetSecurityEventsTags(span, events) + if blockPtr.Handler != nil { + blockPtr.Handler.ServeHTTP(w, r) + } } }() - if bypassHandler != nil { - handler = bypassHandler - bypassHandler = nil + if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil { + handler = blockPtr.Handler + blockPtr.Handler = nil } + handler.ServeHTTP(w, r) }) } - -// MakeHandlerOperationArgs creates the HandlerOperationArgs value. -func MakeHandlerOperationArgs(r *http.Request, clientIP netip.Addr, pathParams map[string]string) types.HandlerOperationArgs { - cookies := makeCookies(r) // TODO(Julio-Guerra): avoid actively parsing the cookies thanks to dynamic instrumentation - headers := headersRemoveCookies(r.Header) - headers["host"] = []string{r.Host} - return types.HandlerOperationArgs{ - Method: r.Method, - RequestURI: r.RequestURI, - Headers: headers, - Cookies: cookies, - Query: r.URL.Query(), // TODO(Julio-Guerra): avoid actively parsing the query values thanks to dynamic instrumentation - PathParams: pathParams, - ClientIP: clientIP, - } -} - -// MakeHandlerOperationRes creates the HandlerOperationRes value. -func MakeHandlerOperationRes(w http.ResponseWriter) types.HandlerOperationRes { - var status int - if mw, ok := w.(interface{ Status() int }); ok { - status = mw.Status() - } - return types.HandlerOperationRes{Status: status, Headers: headersRemoveCookies(w.Header())} -} - -// Remove cookies from the request headers and return the map of headers -// Used from `server.request.headers.no_cookies` and server.response.headers.no_cookies` addresses for the WAF -func headersRemoveCookies(headers http.Header) map[string][]string { - headersNoCookies := make(http.Header, len(headers)) - for k, v := range headers { - k := strings.ToLower(k) - if k == "cookie" { - continue - } - headersNoCookies[k] = v - } - return headersNoCookies -} - -// Return the map of parsed cookies if any and following the specification of -// the rule address `server.request.cookies`. -func makeCookies(r *http.Request) map[string][]string { - parsed := r.Cookies() - if len(parsed) == 0 { - return nil - } - cookies := make(map[string][]string, len(parsed)) - for _, c := range parsed { - cookies[c.Name] = append(cookies[c.Name], c.Value) - } - return cookies -} - -// StartOperation starts an HTTP handler operation, along with the given -// context and arguments and emits a start event up in the operation stack. -// The operation is linked to the global root operation since an HTTP operation -// is always expected to be first in the operation stack. -func StartOperation(ctx context.Context, args types.HandlerOperationArgs, setup ...func(*types.Operation)) (context.Context, *types.Operation) { - op := &types.Operation{ - Operation: dyngo.NewOperation(nil), - TagsHolder: trace.NewTagsHolder(), - } - for _, cb := range setup { - cb(op) - } - - return dyngo.StartAndRegisterOperation(ctx, op, args), op -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/init.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/init.go deleted file mode 100644 index 9f4db28f..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/init.go +++ /dev/null @@ -1,15 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package httpsec - -import ( - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec" -) - -func init() { - appsec.AddWAFEventListener(httpsec.Install) -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/roundtripper.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/roundtripper.go index 9df86576..8a7f1f24 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/roundtripper.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/roundtripper.go @@ -11,14 +11,31 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" ) var badInputContextOnce sync.Once +type ( + RoundTripOperation struct { + dyngo.Operation + } + + // RoundTripOperationArgs is the round trip operation arguments. + RoundTripOperationArgs struct { + // URL corresponds to the address `server.io.net.url`. + URL string + } + + // RoundTripOperationRes is the round trip operation results. + RoundTripOperationRes struct{} +) + +func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {} +func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {} + func ProtectRoundTrip(ctx context.Context, url string) error { - opArgs := types.RoundTripOperationArgs{ + opArgs := RoundTripOperationArgs{ URL: url, } @@ -32,7 +49,7 @@ func ProtectRoundTrip(ctx context.Context, url string) error { return nil } - op := &types.RoundTripOperation{ + op := &RoundTripOperation{ Operation: dyngo.NewOperation(parent), } @@ -43,7 +60,7 @@ func ProtectRoundTrip(ctx context.Context, url string) error { }) dyngo.StartOperation(op, opArgs) - dyngo.FinishOperation(op, types.RoundTripOperationRes{}) + dyngo.FinishOperation(op, RoundTripOperationRes{}) if err != nil { log.Debug("appsec: outgoing http request blocked by the WAF on URL: %s", url) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/tags.go deleted file mode 100644 index 0fc4357a..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/tags.go +++ /dev/null @@ -1,28 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package httpsec - -import ( - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace" -) - -// setRequestHeadersTags sets the AppSec-specific request headers span tags. -func setRequestHeadersTags(span trace.TagSetter, headers map[string][]string) { - setHeadersTags(span, "http.request.headers.", headers) -} - -// setResponseHeadersTags sets the AppSec-specific response headers span tags. -func setResponseHeadersTags(span trace.TagSetter, headers map[string][]string) { - setHeadersTags(span, "http.response.headers.", headers) -} - -// setHeadersTags sets the AppSec-specific headers span tags. -func setHeadersTags(span trace.TagSetter, tagPrefix string, headers map[string][]string) { - for h, v := range httptrace.NormalizeHTTPHeaders(headers) { - span.SetTag(tagPrefix+h, v) - } -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types/types.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types/types.go deleted file mode 100644 index 2ea8648b..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types/types.go +++ /dev/null @@ -1,101 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package types - -import ( - "net/netip" - "sync" - - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" -) - -// Operation type representing an HTTP operation. It must be created with -// StartOperation() and finished with its Finish(). -type ( - Operation struct { - dyngo.Operation - trace.TagsHolder - trace.SecurityEventsHolder - mu sync.RWMutex - } - - // SDKBodyOperation type representing an SDK body - SDKBodyOperation struct { - dyngo.Operation - } - - RoundTripOperation struct { - dyngo.Operation - } -) - -// Finish the HTTP handler operation, along with the given results and emits a -// finish event up in the operation stack. -func (op *Operation) Finish(res HandlerOperationRes) []any { - dyngo.FinishOperation(op, res) - return op.Events() -} - -// Abstract HTTP handler operation definition. -type ( - // HandlerOperationArgs is the HTTP handler operation arguments. - HandlerOperationArgs struct { - // ClientIP corresponds to the address `http.client_ip` - ClientIP netip.Addr - // Headers corresponds to the address `server.request.headers.no_cookies` - Headers map[string][]string - // Cookies corresponds to the address `server.request.cookies` - Cookies map[string][]string - // Query corresponds to the address `server.request.query` - Query map[string][]string - // PathParams corresponds to the address `server.request.path_params` - PathParams map[string]string - // Method is the http method verb of the request, address is `server.request.method` - Method string - // RequestURI corresponds to the address `server.request.uri.raw` - RequestURI string - } - - // HandlerOperationRes is the HTTP handler operation results. - HandlerOperationRes struct { - Headers map[string][]string - // Status corresponds to the address `server.response.status`. - Status int - } - - // SDKBodyOperationArgs is the SDK body operation arguments. - SDKBodyOperationArgs struct { - // Body corresponds to the address `server.request.body`. - Body any - } - - // SDKBodyOperationRes is the SDK body operation results. - SDKBodyOperationRes struct{} - - // RoundTripOperationArgs is the round trip operation arguments. - RoundTripOperationArgs struct { - // URL corresponds to the address `server.io.net.url`. - URL string - } - - // RoundTripOperationRes is the round trip operation results. - RoundTripOperationRes struct{} -) - -// Finish finishes the SDKBody operation and emits a finish event -func (op *SDKBodyOperation) Finish() { - dyngo.FinishOperation(op, SDKBodyOperationRes{}) -} - -func (SDKBodyOperationArgs) IsArgOf(*SDKBodyOperation) {} -func (SDKBodyOperationRes) IsResultOf(*SDKBodyOperation) {} - -func (HandlerOperationArgs) IsArgOf(*Operation) {} -func (HandlerOperationRes) IsResultOf(*Operation) {} - -func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {} -func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/shared.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/shared.go deleted file mode 100644 index ec5b4a75..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/shared.go +++ /dev/null @@ -1,71 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2023 Datadog, Inc. - -package sharedsec - -import ( - "context" - "reflect" - - "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" -) - -type ( - // UserIDOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single - // call to ExecuteUserIDOperation - UserIDOperation struct { - dyngo.Operation - } - // UserIDOperationArgs is the user ID operation arguments. - UserIDOperationArgs struct { - UserID string - } - // UserIDOperationRes is the user ID operation results. - UserIDOperationRes struct{} - - // OnUserIDOperationStart function type, called when a user ID - // operation starts. - OnUserIDOperationStart func(operation *UserIDOperation, args UserIDOperationArgs) -) - -var userIDOperationArgsType = reflect.TypeOf((*UserIDOperationArgs)(nil)).Elem() - -// ExecuteUserIDOperation starts and finishes the UserID operation by emitting a dyngo start and finish events -// An error is returned if the user associated to that operation must be blocked -func ExecuteUserIDOperation(parent dyngo.Operation, args UserIDOperationArgs) error { - var err error - op := &UserIDOperation{Operation: dyngo.NewOperation(parent)} - dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { err = e }) - dyngo.StartOperation(op, args) - dyngo.FinishOperation(op, UserIDOperationRes{}) - return err -} - -// ListenedType returns the type a OnUserIDOperationStart event listener -// listens to, which is the UserIDOperationStartArgs type. -func (OnUserIDOperationStart) ListenedType() reflect.Type { return userIDOperationArgsType } - -// Call the underlying event listener function by performing the type-assertion -// on v whose type is the one returned by ListenedType(). -func (f OnUserIDOperationStart) Call(op dyngo.Operation, v interface{}) { - f(op.(*UserIDOperation), v.(UserIDOperationArgs)) -} - -// MonitorUser starts and finishes a UserID operation. -// A call to the WAF is made to check the user ID and an error is returned if the -// user should be blocked. The return value is nil otherwise. -func MonitorUser(ctx context.Context, userID string) error { - if parent, ok := dyngo.FromContext(ctx); ok { - return ExecuteUserIDOperation(parent, UserIDOperationArgs{UserID: userID}) - } - log.Error("appsec: user ID monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context") - return nil - -} - -func (UserIDOperationArgs) IsArgOf(*UserIDOperation) {} -func (UserIDOperationRes) IsResultOf(*UserIDOperation) {} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/sql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/sql.go new file mode 100644 index 00000000..1c888d9f --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/sql.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +package sqlsec + +import ( + "context" + "sync" + + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +var badInputContextOnce sync.Once + +type ( + SQLOperation struct { + dyngo.Operation + } + + SQLOperationArgs struct { + // Query corresponds to the addres `server.db.statement` + Query string + // Driver corresponds to the addres `server.db.system` + Driver string + } + SQLOperationRes struct{} +) + +func (SQLOperationArgs) IsArgOf(*SQLOperation) {} +func (SQLOperationRes) IsResultOf(*SQLOperation) {} + +func ProtectSQLOperation(ctx context.Context, query, driver string) error { + opArgs := SQLOperationArgs{ + Query: query, + Driver: driver, + } + + parent, _ := dyngo.FromContext(ctx) + if parent == nil { // No parent operation => we can't monitor the request + badInputContextOnce.Do(func() { + log.Debug("appsec: outgoing SQL operation monitoring ignored: could not find the handler " + + "instrumentation metadata in the request context: the request handler is not being monitored by a " + + "middleware function or the incoming request context has not be forwarded correctly to the SQL connection") + }) + return nil + } + + op := &SQLOperation{ + Operation: dyngo.NewOperation(parent), + } + + var err *events.BlockingSecurityEvent + // TODO: move the data listener as a setup function of SQLsec.StartSQLOperation(ars, ) + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + + dyngo.StartOperation(op, opArgs) + dyngo.FinishOperation(op, SQLOperationRes{}) + + if err != nil { + log.Debug("appsec: outgoing SQL operation blocked by the WAF") + return err + } + + return nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types/sql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types/sql.go deleted file mode 100644 index 379eb7f7..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types/sql.go +++ /dev/null @@ -1,27 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package types - -import ( - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" -) - -type ( - SQLOperation struct { - dyngo.Operation - } - - SQLOperationArgs struct { - // Query corresponds to the addres `server.db.statement` - Query string - // Driver corresponds to the addres `server.db.system` - Driver string - } - SQLOperationRes struct{} -) - -func (SQLOperationArgs) IsArgOf(*SQLOperation) {} -func (SQLOperationRes) IsResultOf(*SQLOperation) {} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/service_entry_span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/service_entry_span.go new file mode 100644 index 00000000..98e14b09 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/service_entry_span.go @@ -0,0 +1,158 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "context" + "encoding/json" + "sync" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +type ( + // ServiceEntrySpanOperation is a dyngo.Operation that holds a the first span of a service. Usually a http or grpc span. + ServiceEntrySpanOperation struct { + dyngo.Operation + tags map[string]any + jsonTags map[string]any + mu sync.Mutex + } + + // ServiceEntrySpanArgs is the arguments for a ServiceEntrySpanOperation + ServiceEntrySpanArgs struct{} + + // ServiceEntrySpanTag is a key value pair event that is used to tag a service entry span + ServiceEntrySpanTag struct { + Key string + Value any + } + + // JSONServiceEntrySpanTag is a key value pair event that is used to tag a service entry span + // It will be serialized as JSON when added to the span + JSONServiceEntrySpanTag struct { + Key string + Value any + } + + // ServiceEntrySpanTagsBulk is a bulk event that is used to send tags to a service entry span + ServiceEntrySpanTagsBulk struct { + Tags []JSONServiceEntrySpanTag + SerializableTags []JSONServiceEntrySpanTag + } +) + +func (ServiceEntrySpanArgs) IsArgOf(*ServiceEntrySpanOperation) {} + +// SetTag adds the key/value pair to the tags to add to the service entry span +func (op *ServiceEntrySpanOperation) SetTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.tags[key] = value +} + +// SetSerializableTag adds the key/value pair to the tags to add to the service entry span. +// The value MAY be serialized as JSON if necessary but simple types will not be serialized. +func (op *ServiceEntrySpanOperation) SetSerializableTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.setSerializableTag(key, value) +} + +// SetSerializableTags adds the key/value pairs to the tags to add to the service entry span. +// Values MAY be serialized as JSON if necessary but simple types will not be serialized. +func (op *ServiceEntrySpanOperation) SetSerializableTags(tags map[string]any) { + op.mu.Lock() + defer op.mu.Unlock() + for key, value := range tags { + op.setSerializableTag(key, value) + } +} + +func (op *ServiceEntrySpanOperation) setSerializableTag(key string, value any) { + switch value.(type) { + case string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, bool: + op.tags[key] = value + default: + op.jsonTags[key] = value + } +} + +// SetTags fills the span tags using the key/value pairs found in `tags` +func (op *ServiceEntrySpanOperation) SetTags(tags map[string]any) { + op.mu.Lock() + defer op.mu.Unlock() + for k, v := range tags { + op.tags[k] = v + } +} + +// SetStringTags fills the span tags using the key/value pairs found in `tags` +func (op *ServiceEntrySpanOperation) SetStringTags(tags map[string]string) { + op.mu.Lock() + defer op.mu.Unlock() + for k, v := range tags { + op.tags[k] = v + } +} + +// OnServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTag event +func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagEvent(tag ServiceEntrySpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +// OnJSONServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a JSONServiceEntrySpanTag event +func (op *ServiceEntrySpanOperation) OnJSONServiceEntrySpanTagEvent(tag JSONServiceEntrySpanTag) { + op.SetSerializableTag(tag.Key, tag.Value) +} + +// OnServiceEntrySpanTagsBulkEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTagsBulk event +func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagsBulkEvent(bulk ServiceEntrySpanTagsBulk) { + for _, v := range bulk.Tags { + op.SetTag(v.Key, v.Value) + } + + for _, v := range bulk.SerializableTags { + op.SetSerializableTag(v.Key, v.Value) + } +} + +// OnSpanTagEvent is a listener for SpanTag events. +func (op *ServiceEntrySpanOperation) OnSpanTagEvent(tag SpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +func StartServiceEntrySpanOperation(ctx context.Context) (*ServiceEntrySpanOperation, context.Context) { + parent, _ := dyngo.FromContext(ctx) + op := &ServiceEntrySpanOperation{ + Operation: dyngo.NewOperation(parent), + tags: make(map[string]any), + jsonTags: make(map[string]any), + } + return op, dyngo.StartAndRegisterOperation(ctx, op, ServiceEntrySpanArgs{}) +} + +func (op *ServiceEntrySpanOperation) Finish(span TagSetter) { + if _, ok := span.(*NoopTagSetter); ok { // If the span is a NoopTagSetter or is nil, we don't need to set any tags + return + } + + op.mu.Lock() + defer op.mu.Unlock() + + for k, v := range op.tags { + span.SetTag(k, v) + } + + for k, v := range op.jsonTags { + strValue, err := json.Marshal(v) + if err != nil { + log.Debug("appsec: failed to marshal tag %s: %v", k, err) + } + span.SetTag(k, string(strValue)) + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/span.go new file mode 100644 index 00000000..d6614f90 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/span.go @@ -0,0 +1,67 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "context" + "sync" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" +) + +type ( + // SpanOperation is a dyngo.Operation that holds a ddtrace.Span. + // It used as a middleware for appsec code and the tracer code + // hopefully some day this operation will create spans instead of simply using them + SpanOperation struct { + dyngo.Operation + tags map[string]any + mu sync.Mutex + } + + // SpanArgs is the arguments for a SpanOperation + SpanArgs struct{} + + // SpanTag is a key value pair event that is used to tag the current span + SpanTag struct { + Key string + Value any + } +) + +func (SpanArgs) IsArgOf(*SpanOperation) {} + +// SetTag adds the key/value pair to the tags to add to the span +func (op *SpanOperation) SetTag(key string, value any) { + op.mu.Lock() + defer op.mu.Unlock() + op.tags[key] = value +} + +// OnSpanTagEvent is a listener for SpanTag events. +func (op *SpanOperation) OnSpanTagEvent(tag SpanTag) { + op.SetTag(tag.Key, tag.Value) +} + +func StartSpanOperation(ctx context.Context) (*SpanOperation, context.Context) { + op := &SpanOperation{ + tags: make(map[string]any), + } + return op, dyngo.StartAndRegisterOperation(ctx, op, SpanArgs{}) +} + +func (op *SpanOperation) Finish(span TagSetter) { + if _, ok := span.(*NoopTagSetter); ok { // If the span is a NoopTagSetter or is nil, we don't need to set any tags + return + } + + op.mu.Lock() + defer op.mu.Unlock() + + for k, v := range op.tags { + span.SetTag(k, v) + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/tag_setter.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/tag_setter.go new file mode 100644 index 00000000..a7f5bc19 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace/tag_setter.go @@ -0,0 +1,29 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +// TagSetter is the interface needed to set a span tag. +type TagSetter interface { + SetTag(string, any) +} + +// NoopTagSetter is a TagSetter that does nothing. Useful when no tracer +// Span is available, but a TagSetter is assumed. +type NoopTagSetter struct{} + +func (NoopTagSetter) SetTag(string, any) { + // Do nothing +} + +type TestTagSetter map[string]any + +func (t TestTagSetter) SetTag(key string, value any) { + t[key] = value +} + +func (t TestTagSetter) Tags() map[string]any { + return t +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec/user.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec/user.go new file mode 100644 index 00000000..50a4352e --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec/user.go @@ -0,0 +1,51 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package usersec + +import ( + "context" + + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" +) + +const errorLog = ` +appsec: user login monitoring ignored: could not find the http handler instrumentation metadata in the request context: + the request handler is not being monitored by a middleware function or the provided context is not the expected request context +` + +type ( + // UserLoginOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single + // call to ExecuteUserIDOperation + UserLoginOperation struct { + dyngo.Operation + } + // UserLoginOperationArgs is the user ID operation arguments. + UserLoginOperationArgs struct{} + + // UserLoginOperationRes is the user ID operation results. + UserLoginOperationRes struct { + UserID string + SessionID string + Success bool + } +) + +func StartUserLoginOperation(ctx context.Context, args UserLoginOperationArgs) (*UserLoginOperation, *error) { + parent, _ := dyngo.FromContext(ctx) + op := &UserLoginOperation{Operation: dyngo.NewOperation(parent)} + var err error + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { err = e }) + dyngo.StartOperation(op, args) + return op, &err +} + +func (op *UserLoginOperation) Finish(args UserLoginOperationRes) { + dyngo.FinishOperation(op, args) +} + +func (UserLoginOperationArgs) IsArgOf(*UserLoginOperation) {} +func (UserLoginOperationRes) IsResultOf(*UserLoginOperation) {} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/actions.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/actions.go new file mode 100644 index 00000000..4eabcfaf --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/actions.go @@ -0,0 +1,56 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +type ( + // Action is a generic interface that represents any WAF action + Action interface { + EmitData(op dyngo.Operation) + } +) + +type actionHandler func(map[string]any) []Action + +// actionHandlers is a map of action types to their respective handler functions +// It is populated by the init functions of the actions packages +var actionHandlers = map[string]actionHandler{} + +func registerActionHandler(aType string, handler actionHandler) { + if _, ok := actionHandlers[aType]; ok { + log.Warn("appsec: action type `%s` already registered", aType) + return + } + actionHandlers[aType] = handler +} + +// SendActionEvents sends the relevant actions to the operation's data listener. +// It returns true if at least one of those actions require interrupting the request handler +// When SDKError is not nil, this error is sent to the op with EmitData so that the invoked SDK can return it +func SendActionEvents(op dyngo.Operation, actions map[string]any) { + for aType, params := range actions { + log.Debug("appsec: processing %s action with params %v", aType, params) + params, ok := params.(map[string]any) + if !ok { + log.Debug("appsec: could not cast action params to map[string]any from %T", params) + continue + } + + actionHandler, ok := actionHandlers[aType] + if !ok { + log.Debug("appsec: unknown action type `%s`", aType) + continue + } + + for _, a := range actionHandler(params) { + a.EmitData(op) + } + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/actions.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/block.go similarity index 53% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/actions.go rename to vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/block.go index 49cd65a5..ae802b60 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/actions.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/block.go @@ -1,22 +1,21 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2022 Datadog, Inc. +// Copyright 2024 Datadog, Inc. -package sharedsec +package actions import ( - _ "embed" // Blank import + _ "embed" // embed is used to embed the blocked-template.json and blocked-template.html files "net/http" "os" "strings" + "github.com/mitchellh/mapstructure" + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace" - - "github.com/mitchellh/mapstructure" ) // blockedTemplateJSON is the default JSON template used to write responses for blocked requests @@ -43,38 +42,12 @@ func init() { *template = t } } - } + + registerActionHandler("block_request", NewBlockAction) } type ( - // Action is a generic interface that represents any WAF action - Action interface { - Blocking() bool - EmitData(op dyngo.Operation) - } - - // HTTPAction are actions that interact with an HTTP request flow (block, redirect...) - HTTPAction struct { - http.Handler - } - // GRPCAction are actions that interact with a GRPC request flow - GRPCAction struct { - GRPCWrapper - } - // StackTraceAction are actions that generate a stacktrace - StackTraceAction struct { - Event stacktrace.Event - } - - // GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc) - // that returns a status code and an error - // TODO: rely on strongly typed actions (with the actual grpc types) by introducing WAF constructors - // living in the contrib packages, along with their dependencies - something like `appsec.RegisterWAFConstructor(newGRPCWAF)` - // Such constructors would receive the full appsec config and rules, so that they would be able to build - // specific blocking actions. - GRPCWrapper func() (uint32, error) - // blockActionParams are the dynamic parameters to be provided to a "block_request" // action type upon invocation blockActionParams struct { @@ -84,40 +57,58 @@ type ( StatusCode int `mapstructure:"status_code"` Type string `mapstructure:"type,omitempty"` } - // redirectActionParams are the dynamic parameters to be provided to a "redirect_request" - // action type upon invocation - redirectActionParams struct { - Location string `mapstructure:"location,omitempty"` - StatusCode int `mapstructure:"status_code"` + // GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc) + // that returns a status code and an error + GRPCWrapper func() (uint32, error) + + // BlockGRPC are actions that interact with a GRPC request flow + BlockGRPC struct { + GRPCWrapper + } + + // BlockHTTP are actions that interact with an HTTP request flow + BlockHTTP struct { + http.Handler } ) -func (a *HTTPAction) Blocking() bool { return true } -func (a *HTTPAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) } +func (a *BlockGRPC) EmitData(op dyngo.Operation) { + dyngo.EmitData(op, a) + dyngo.EmitData(op, &events.BlockingSecurityEvent{}) +} -func (a *GRPCAction) Blocking() bool { return true } -func (a *GRPCAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) } +func (a *BlockHTTP) EmitData(op dyngo.Operation) { + dyngo.EmitData(op, a) + dyngo.EmitData(op, &events.BlockingSecurityEvent{}) +} -func (a *StackTraceAction) Blocking() bool { return false } -func (a *StackTraceAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) } +func newGRPCBlockRequestAction(status int) *BlockGRPC { + return &BlockGRPC{GRPCWrapper: newGRPCBlockHandler(status)} +} -// NewStackTraceAction creates an action for the "stacktrace" action type -func NewStackTraceAction(params map[string]any) Action { - id, ok := params["stack_id"] - if !ok { - log.Debug("appsec: could not read stack_id parameter for generate_stack action") - return nil +func newGRPCBlockHandler(status int) GRPCWrapper { + return func() (uint32, error) { + return uint32(status), &events.BlockingSecurityEvent{} } +} - strID, ok := id.(string) - if !ok { - log.Debug("appsec: could not cast stacktrace ID to string") - return nil +func blockParamsFromMap(params map[string]any) (blockActionParams, error) { + grpcCode := 10 + p := blockActionParams{ + Type: "auto", + StatusCode: 403, + GRPCStatusCode: &grpcCode, } - event := stacktrace.NewEvent(stacktrace.ExploitEvent, stacktrace.WithID(strID)) + if err := mapstructure.WeakDecode(params, &p); err != nil { + return p, err + } - return &StackTraceAction{Event: *event} + if p.GRPCStatusCode == nil { + p.GRPCStatusCode = &grpcCode + } + + return p, nil } // NewBlockAction creates an action for the "block_request" action type @@ -133,36 +124,8 @@ func NewBlockAction(params map[string]any) []Action { } } -// NewRedirectAction creates an action for the "redirect_request" action type -func NewRedirectAction(params map[string]any) *HTTPAction { - p, err := redirectParamsFromMap(params) - if err != nil { - log.Debug("appsec: couldn't decode redirect action parameters") - return nil - } - return newRedirectRequestAction(p.StatusCode, p.Location) -} - -func newHTTPBlockRequestAction(status int, template string) *HTTPAction { - return &HTTPAction{Handler: newBlockHandler(status, template)} -} - -func newGRPCBlockRequestAction(status int) *GRPCAction { - return &GRPCAction{GRPCWrapper: newGRPCBlockHandler(status)} - -} - -func newRedirectRequestAction(status int, loc string) *HTTPAction { - // Default to 303 if status is out of redirection codes bounds - if status < 300 || status >= 400 { - status = 303 - } - - // If location is not set we fall back on a default block action - if loc == "" { - return &HTTPAction{Handler: newBlockHandler(403, string(blockedTemplateJSON))} - } - return &HTTPAction{Handler: http.RedirectHandler(loc, status)} +func newHTTPBlockRequestAction(status int, template string) *BlockHTTP { + return &BlockHTTP{Handler: newBlockHandler(status, template)} } // newBlockHandler creates, initializes and returns a new BlockRequestAction @@ -190,40 +153,9 @@ func newBlockHandler(status int, template string) http.Handler { } func newBlockRequestHandler(status int, ct string, payload []byte) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", ct) w.WriteHeader(status) w.Write(payload) }) } - -func newGRPCBlockHandler(status int) GRPCWrapper { - return func() (uint32, error) { - return uint32(status), &events.BlockingSecurityEvent{} - } -} - -func blockParamsFromMap(params map[string]any) (blockActionParams, error) { - grpcCode := 10 - p := blockActionParams{ - Type: "auto", - StatusCode: 403, - GRPCStatusCode: &grpcCode, - } - - if err := mapstructure.WeakDecode(params, &p); err != nil { - return p, err - } - - if p.GRPCStatusCode == nil { - p.GRPCStatusCode = &grpcCode - } - return p, nil - -} - -func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) { - var p redirectActionParams - err := mapstructure.WeakDecode(params, &p) - return p, err -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.html b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/blocked-template.html similarity index 100% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.html rename to vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/blocked-template.html diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/blocked-template.json similarity index 78% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json rename to vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/blocked-template.json index 885d766c..12ae2969 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec/blocked-template.json +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/blocked-template.json @@ -1 +1 @@ -{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]} +{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]} \ No newline at end of file diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/http_redirect.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/http_redirect.go new file mode 100644 index 00000000..3cdca4c8 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/http_redirect.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "net/http" + + "github.com/mitchellh/mapstructure" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +// redirectActionParams are the dynamic parameters to be provided to a "redirect_request" +// action type upon invocation +type redirectActionParams struct { + Location string `mapstructure:"location,omitempty"` + StatusCode int `mapstructure:"status_code"` +} + +func init() { + registerActionHandler("redirect_request", NewRedirectAction) +} + +func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) { + var p redirectActionParams + err := mapstructure.WeakDecode(params, &p) + return p, err +} + +func newRedirectRequestAction(status int, loc string) *BlockHTTP { + // Default to 303 if status is out of redirection codes bounds + if status < http.StatusMultipleChoices || status >= http.StatusBadRequest { + status = http.StatusSeeOther + } + + // If location is not set we fall back on a default block action + if loc == "" { + return &BlockHTTP{Handler: newBlockHandler(http.StatusForbidden, string(blockedTemplateJSON))} + } + return &BlockHTTP{Handler: http.RedirectHandler(loc, status)} +} + +// NewRedirectAction creates an action for the "redirect_request" action type +func NewRedirectAction(params map[string]any) []Action { + p, err := redirectParamsFromMap(params) + if err != nil { + log.Debug("appsec: couldn't decode redirect action parameters") + return nil + } + return []Action{newRedirectRequestAction(p.StatusCode, p.Location)} +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/stacktrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/stacktrace.go new file mode 100644 index 00000000..47b4dd60 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions/stacktrace.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package actions + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace" +) + +func init() { + registerActionHandler("generate_stack", NewStackTraceAction) +} + +// StackTraceAction are actions that generate a stacktrace +type StackTraceAction struct { + Event *stacktrace.Event +} + +func (a *StackTraceAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) } + +// NewStackTraceAction creates an action for the "stacktrace" action type +func NewStackTraceAction(params map[string]any) []Action { + id, ok := params["stack_id"] + if !ok { + log.Debug("appsec: could not read stack_id parameter for generate_stack action") + return nil + } + + strID, ok := id.(string) + if !ok { + log.Debug("appsec: could not cast stacktrace ID to string") + return nil + } + + return []Action{ + &StackTraceAction{ + stacktrace.NewEvent(stacktrace.ExploitEvent, stacktrace.WithID(strID)), + }, + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/addresses.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/addresses.go new file mode 100644 index 00000000..03163df2 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/addresses.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package addresses + +const ( + ServerRequestMethodAddr = "server.request.method" + ServerRequestRawURIAddr = "server.request.uri.raw" + ServerRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies" + ServerRequestCookiesAddr = "server.request.cookies" + ServerRequestQueryAddr = "server.request.query" + ServerRequestPathParamsAddr = "server.request.path_params" + ServerRequestBodyAddr = "server.request.body" + ServerResponseStatusAddr = "server.response.status" + ServerResponseHeadersNoCookiesAddr = "server.response.headers.no_cookies" + + ClientIPAddr = "http.client_ip" + + UserIDAddr = "usr.id" + UserSessionIDAddr = "usr.session_id" + UserLoginSuccessAddr = "server.business_logic.users.login.success" + UserLoginFailureAddr = "server.business_logic.users.login.failure" + + ServerIoNetURLAddr = "server.io.net.url" + ServerIOFSFileAddr = "server.io.fs.file" + ServerDBStatementAddr = "server.db.statement" + ServerDBTypeAddr = "server.db.system" + + GRPCServerMethodAddr = "grpc.server.method" + GRPCServerRequestMetadataAddr = "grpc.server.request.metadata" + GRPCServerRequestMessageAddr = "grpc.server.request.message" + GRPCServerResponseMessageAddr = "grpc.server.response.message" + GRPCServerResponseMetadataHeadersAddr = "grpc.server.response.metadata.headers" + GRPCServerResponseMetadataTrailersAddr = "grpc.server.response.metadata.trailers" + GRPCServerResponseStatusCodeAddr = "grpc.server.response.status" + + GraphQLServerResolverAddr = "graphql.server.resolver" +) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/builder.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/builder.go new file mode 100644 index 00000000..946a62bc --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses/builder.go @@ -0,0 +1,243 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package addresses + +import ( + "net/netip" + "strconv" + + waf "github.com/DataDog/go-libddwaf/v3" +) + +const contextProcessKey = "waf.context.processor" + +type RunAddressDataBuilder struct { + waf.RunAddressData +} + +func NewAddressesBuilder() *RunAddressDataBuilder { + return &RunAddressDataBuilder{ + RunAddressData: waf.RunAddressData{ + Persistent: make(map[string]any, 1), + Ephemeral: make(map[string]any, 1), + }, + } +} + +func (b *RunAddressDataBuilder) WithMethod(method string) *RunAddressDataBuilder { + b.Persistent[ServerRequestMethodAddr] = method + return b +} + +func (b *RunAddressDataBuilder) WithRawURI(uri string) *RunAddressDataBuilder { + b.Persistent[ServerRequestRawURIAddr] = uri + return b +} + +func (b *RunAddressDataBuilder) WithHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + headers = nil + } + b.Persistent[ServerRequestHeadersNoCookiesAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithCookies(cookies map[string][]string) *RunAddressDataBuilder { + if len(cookies) == 0 { + cookies = nil + } + b.Persistent[ServerRequestCookiesAddr] = cookies + return b +} + +func (b *RunAddressDataBuilder) WithQuery(query map[string][]string) *RunAddressDataBuilder { + if len(query) == 0 { + query = nil + } + b.Persistent[ServerRequestQueryAddr] = query + return b +} + +func (b *RunAddressDataBuilder) WithPathParams(params map[string]string) *RunAddressDataBuilder { + if len(params) == 0 { + return b + } + b.Persistent[ServerRequestPathParamsAddr] = params + return b +} + +func (b *RunAddressDataBuilder) WithRequestBody(body any) *RunAddressDataBuilder { + if body == nil { + return b + } + b.Persistent[ServerRequestBodyAddr] = body + return b +} + +func (b *RunAddressDataBuilder) WithResponseStatus(status int) *RunAddressDataBuilder { + if status == 0 { + return b + } + b.Persistent[ServerResponseStatusAddr] = strconv.Itoa(status) + return b +} + +func (b *RunAddressDataBuilder) WithResponseHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + return b + } + b.Persistent[ServerResponseHeadersNoCookiesAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithClientIP(ip netip.Addr) *RunAddressDataBuilder { + if !ip.IsValid() { + return b + } + b.Persistent[ClientIPAddr] = ip.String() + return b +} + +func (b *RunAddressDataBuilder) WithUserID(id string) *RunAddressDataBuilder { + if id == "" { + return b + } + b.Persistent[UserIDAddr] = id + return b +} + +func (b *RunAddressDataBuilder) WithUserSessionID(id string) *RunAddressDataBuilder { + if id == "" { + return b + } + b.Persistent[UserSessionIDAddr] = id + return b + +} + +func (b *RunAddressDataBuilder) WithUserLoginSuccess() *RunAddressDataBuilder { + b.Persistent[UserLoginSuccessAddr] = nil + return b +} + +func (b *RunAddressDataBuilder) WithUserLoginFailure() *RunAddressDataBuilder { + b.Persistent[UserLoginFailureAddr] = nil + return b +} + +func (b *RunAddressDataBuilder) WithFilePath(file string) *RunAddressDataBuilder { + if file == "" { + return b + } + b.Ephemeral[ServerIOFSFileAddr] = file + b.Scope = waf.RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithURL(url string) *RunAddressDataBuilder { + if url == "" { + return b + } + b.Ephemeral[ServerIoNetURLAddr] = url + b.Scope = waf.RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithDBStatement(statement string) *RunAddressDataBuilder { + if statement == "" { + return b + } + b.Ephemeral[ServerDBStatementAddr] = statement + b.Scope = waf.RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithDBType(driver string) *RunAddressDataBuilder { + if driver == "" { + return b + } + b.Ephemeral[ServerDBTypeAddr] = driver + b.Scope = waf.RASPScope + return b +} + +func (b *RunAddressDataBuilder) WithGRPCMethod(method string) *RunAddressDataBuilder { + if method == "" { + return b + } + b.Persistent[GRPCServerMethodAddr] = method + return b +} + +func (b *RunAddressDataBuilder) WithGRPCRequestMessage(message any) *RunAddressDataBuilder { + if message == nil { + return b + } + b.Ephemeral[GRPCServerRequestMessageAddr] = message + return b +} + +func (b *RunAddressDataBuilder) WithGRPCRequestMetadata(metadata map[string][]string) *RunAddressDataBuilder { + if len(metadata) == 0 { + return b + } + b.Persistent[GRPCServerRequestMetadataAddr] = metadata + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMessage(message any) *RunAddressDataBuilder { + if message == nil { + return b + } + b.Ephemeral[GRPCServerResponseMessageAddr] = message + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMetadataHeaders(headers map[string][]string) *RunAddressDataBuilder { + if len(headers) == 0 { + return b + } + b.Persistent[GRPCServerResponseMetadataHeadersAddr] = headers + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseMetadataTrailers(trailers map[string][]string) *RunAddressDataBuilder { + if len(trailers) == 0 { + return b + } + b.Persistent[GRPCServerResponseMetadataTrailersAddr] = trailers + return b +} + +func (b *RunAddressDataBuilder) WithGRPCResponseStatusCode(status int) *RunAddressDataBuilder { + if status == 0 { + return b + } + b.Persistent[GRPCServerResponseStatusCodeAddr] = strconv.Itoa(status) + return b +} + +func (b *RunAddressDataBuilder) WithGraphQLResolver(fieldName string, args map[string]any) *RunAddressDataBuilder { + if _, ok := b.Ephemeral[GraphQLServerResolverAddr]; !ok { + b.Ephemeral[GraphQLServerResolverAddr] = map[string]any{} + } + + b.Ephemeral[GraphQLServerResolverAddr].(map[string]any)[fieldName] = args + return b +} + +func (b *RunAddressDataBuilder) ExtractSchema() *RunAddressDataBuilder { + if _, ok := b.Persistent[contextProcessKey]; !ok { + b.Persistent[contextProcessKey] = map[string]bool{} + } + + b.Persistent[contextProcessKey].(map[string]bool)["extract-schema"] = true + return b +} + +func (b *RunAddressDataBuilder) Build() waf.RunAddressData { + return b.RunAddressData +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/context.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/context.go new file mode 100644 index 00000000..698e7218 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/context.go @@ -0,0 +1,160 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "context" + "maps" + "slices" + "sync" + "sync/atomic" + + "github.com/DataDog/appsec-internal-go/limiter" + waf "github.com/DataDog/go-libddwaf/v3" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" +) + +type ( + ContextOperation struct { + dyngo.Operation + *trace.ServiceEntrySpanOperation + + // context is an atomic pointer to the current WAF context. + // Makes sure the calls to context.Run are safe. + context atomic.Pointer[waf.Context] + // limiter comes from the WAF feature and is used to limit the number of events as a whole. + limiter limiter.Limiter + // events is where we store WAF events received from the WAF over the course of the request. + events []any + // stacks is where we store stack traces received from the WAF over the course of the request. + stacks []*stacktrace.Event + // derivatives is where we store any span tags generated by the WAF over the course of the request. + derivatives map[string]any + // supportedAddresses is the set of addresses supported by the WAF. + supportedAddresses config.AddressSet + // mu protects the events, stacks, and derivatives, supportedAddresses slices. + mu sync.Mutex + // logOnce is used to log a warning once when a request has too many WAF events via the built-in limiter or the max value. + logOnce sync.Once + } + + ContextArgs struct{} + + ContextRes struct{} + + // RunEvent is the type of event that should be emitted to child operations to run the WAF + RunEvent struct { + waf.RunAddressData + dyngo.Operation + } +) + +func (ContextArgs) IsArgOf(*ContextOperation) {} +func (ContextRes) IsResultOf(*ContextOperation) {} + +func StartContextOperation(ctx context.Context) (*ContextOperation, context.Context) { + entrySpanOp, ctx := trace.StartServiceEntrySpanOperation(ctx) + op := &ContextOperation{ + Operation: dyngo.NewOperation(entrySpanOp), + ServiceEntrySpanOperation: entrySpanOp, + } + return op, dyngo.StartAndRegisterOperation(ctx, op, ContextArgs{}) +} + +func (op *ContextOperation) Finish(span trace.TagSetter) { + dyngo.FinishOperation(op, ContextRes{}) + op.ServiceEntrySpanOperation.Finish(span) +} + +func (op *ContextOperation) SwapContext(ctx *waf.Context) *waf.Context { + return op.context.Swap(ctx) +} + +func (op *ContextOperation) SetLimiter(limiter limiter.Limiter) { + op.limiter = limiter +} + +func (op *ContextOperation) AddEvents(events ...any) { + if len(events) == 0 { + return + } + + if !op.limiter.Allow() { + log.Warn("appsec: too many WAF events, stopping further reporting") + return + } + + op.mu.Lock() + defer op.mu.Unlock() + + const maxWAFEventsPerRequest = 10 + if len(op.events) >= maxWAFEventsPerRequest { + op.logOnce.Do(func() { + log.Warn("appsec: ignoring new WAF event due to the maximum number of security events per request was reached") + }) + return + } + + op.events = append(op.events, events...) +} + +func (op *ContextOperation) AddStackTraces(stacks ...*stacktrace.Event) { + if len(stacks) == 0 { + return + } + + op.mu.Lock() + defer op.mu.Unlock() + op.stacks = append(op.stacks, stacks...) +} + +func (op *ContextOperation) AbsorbDerivatives(derivatives map[string]any) { + if len(derivatives) == 0 { + return + } + + op.mu.Lock() + defer op.mu.Unlock() + if op.derivatives == nil { + op.derivatives = make(map[string]any) + } + + for k, v := range derivatives { + op.derivatives[k] = v + } +} + +func (op *ContextOperation) Derivatives() map[string]any { + op.mu.Lock() + defer op.mu.Unlock() + return maps.Clone(op.derivatives) +} + +func (op *ContextOperation) Events() []any { + op.mu.Lock() + defer op.mu.Unlock() + return slices.Clone(op.events) +} + +func (op *ContextOperation) StackTraces() []*stacktrace.Event { + op.mu.Lock() + defer op.mu.Unlock() + return slices.Clone(op.stacks) +} + +func (op *ContextOperation) OnEvent(event RunEvent) { + op.Run(event.Operation, event.RunAddressData) +} + +func (op *ContextOperation) SetSupportedAddresses(addrs config.AddressSet) { + op.supportedAddresses = addrs +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/run.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/run.go new file mode 100644 index 00000000..a77abd5b --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/run.go @@ -0,0 +1,78 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "context" + "errors" + "maps" + + waf "github.com/DataDog/go-libddwaf/v3" + wafErrors "github.com/DataDog/go-libddwaf/v3/errors" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions" + + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +// Run runs the WAF with the given address data and sends the results to the event receiver +// the event receiver can be the same os the method receiver but not always +// the event receiver is the one that will receive the actions events generated by the WAF +func (op *ContextOperation) Run(eventReceiver dyngo.Operation, addrs waf.RunAddressData) { + ctx := op.context.Load() + if ctx == nil { // Context was closed concurrently + return + } + + // Remove unsupported addresses in case the listener was registered but some addresses are still unsupported + // Technically the WAF does this step for us but doing this check before calling the WAF makes us skip encoding huge + // values that may be discarded by the WAF afterward. + // e.g. gRPC response body address that is not in the default ruleset but will still be sent to the WAF and may be huge + for _, addrType := range []map[string]any{addrs.Persistent, addrs.Ephemeral} { + maps.DeleteFunc(addrType, func(key string, _ any) bool { + _, ok := op.supportedAddresses[key] + return !ok + }) + } + + result, err := ctx.Run(addrs) + if errors.Is(err, wafErrors.ErrTimeout) { + log.Debug("appsec: WAF timeout value reached: %v", err) + } else if err != nil { + log.Error("appsec: unexpected WAF error: %v", err) + } + + op.AddEvents(result.Events...) + op.AbsorbDerivatives(result.Derivatives) + + actions.SendActionEvents(eventReceiver, result.Actions) + + if result.HasEvents() { + log.Debug("appsec: WAF detected a suspicious event") + } +} + +// RunSimple runs the WAF with the given address data and returns an error that should be forwarded to the caller +func RunSimple(ctx context.Context, addrs waf.RunAddressData, errorLog string) error { + parent, _ := dyngo.FromContext(ctx) + if parent == nil { + log.Error(errorLog) + return nil + } + + var err error + op := dyngo.NewOperation(parent) + dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { + err = e + }) + dyngo.EmitData(op, RunEvent{ + Operation: op, + RunAddressData: addrs, + }) + return err +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/features.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/features.go new file mode 100644 index 00000000..ca286de7 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/features.go @@ -0,0 +1,81 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package appsec + +import ( + "errors" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +var features = []listener.NewFeature{ + trace.NewAppsecSpanTransport, + waf.NewWAFFeature, + httpsec.NewHTTPSecFeature, + grpcsec.NewGRPCSecFeature, + graphqlsec.NewGraphQLSecFeature, + usersec.NewUserSecFeature, + sqlsec.NewSQLSecFeature, + ossec.NewOSSecFeature, + httpsec.NewSSRFProtectionFeature, +} + +func (a *appsec) SwapRootOperation() error { + newRoot := dyngo.NewRootOperation() + newFeatures := make([]listener.Feature, 0, len(features)) + var featureErrors []error + for _, newFeature := range features { + feature, err := newFeature(a.cfg, newRoot) + if err != nil { + featureErrors = append(featureErrors, err) + continue + } + + // If error is nil and feature is nil, it means the feature did not activate itself + if feature == nil { + continue + } + + newFeatures = append(newFeatures, feature) + } + + err := errors.Join(featureErrors...) + if err != nil { + for _, feature := range newFeatures { + feature.Stop() + } + return err + } + + a.featuresMu.Lock() + defer a.featuresMu.Unlock() + + oldFeatures := a.features + a.features = newFeatures + + log.Debug("appsec: stopping the following features: %v", oldFeatures) + log.Debug("appsec: starting the following features: %v", newFeatures) + + dyngo.SwapRootOperation(newRoot) + + log.Debug("appsec: swapped root operation") + + for _, oldFeature := range oldFeatures { + oldFeature.Stop() + } + + return nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/feature.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/feature.go new file mode 100644 index 00000000..6f07e2cd --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/feature.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package listener + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" +) + +// Feature is an interface that represents a feature that can be started and stopped. +type Feature interface { + // String should return a user-friendly name for the feature. + String() string + // Stop stops the feature. + Stop() +} + +// NewFeature is a function that creates a new feature. +// The error returned will be fatal for the application if not nil. +// If both the feature and the error are nil, the feature will be considered inactive. +type NewFeature func(*config.Config, dyngo.Operation) (Feature, error) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec/graphql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec/graphql.go new file mode 100644 index 00000000..cb97baf3 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec/graphql.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package graphqlsec + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "GraphQL Security" +} + +func (*Feature) Stop() {} + +func (f *Feature) OnResolveField(op *graphqlsec.ResolveOperation, args graphqlsec.ResolveOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithGraphQLResolver(args.FieldName, args.Arguments). + Build(), + }) +} + +func NewGraphQLSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf(addresses.GraphQLServerResolverAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnResolveField) + + return feature, nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec/grpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec/grpc.go new file mode 100644 index 00000000..52bb2c0f --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec/grpc.go @@ -0,0 +1,75 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package grpcsec + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +type Feature struct{} + +func (*Feature) String() string { + return "gRPC Security" +} + +func (*Feature) Stop() {} + +func NewGRPCSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf( + addresses.ClientIPAddr, + addresses.GRPCServerMethodAddr, + addresses.GRPCServerRequestMessageAddr, + addresses.GRPCServerRequestMetadataAddr, + addresses.GRPCServerResponseMessageAddr, + addresses.GRPCServerResponseMetadataHeadersAddr, + addresses.GRPCServerResponseMetadataTrailersAddr, + addresses.GRPCServerResponseStatusCodeAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + dyngo.OnFinish(rootOp, feature.OnFinish) + return feature, nil +} + +func (f *Feature) OnStart(op *grpcsec.HandlerOperation, args grpcsec.HandlerOperationArgs) { + ipTags, clientIP := httpsec.ClientIPTags(args.Metadata, false, args.RemoteAddr) + log.Debug("appsec: http client ip detection returned `%s`", clientIP) + + op.SetStringTags(ipTags) + + SetRequestMetadataTags(op, args.Metadata) + + op.Run(op, + addresses.NewAddressesBuilder(). + WithGRPCMethod(args.Method). + WithGRPCRequestMetadata(args.Metadata). + WithClientIP(clientIP). + Build(), + ) +} + +func (f *Feature) OnFinish(op *grpcsec.HandlerOperation, res grpcsec.HandlerOperationRes) { + op.Run(op, + addresses.NewAddressesBuilder(). + WithGRPCResponseStatusCode(res.StatusCode). + Build(), + ) +} + +func SetRequestMetadataTags(span trace.TagSetter, metadata map[string][]string) { + for h, v := range httpsec.NormalizeHTTPHeaders(metadata) { + span.SetTag("grpc.metadata."+h, v) + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/http.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/http.go index c4589313..08b9e853 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/http.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/http.go @@ -1,233 +1,96 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. +// Copyright 2024 Datadog, Inc. package httpsec import ( - "fmt" "math/rand" - "sync" - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" + "github.com/DataDog/appsec-internal-go/appsec" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec" - shared "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames" - - "github.com/DataDog/appsec-internal-go/limiter" - waf "github.com/DataDog/go-libddwaf/v3" -) - -// HTTP rule addresses currently supported by the WAF -const ( - ServerRequestMethodAddr = "server.request.method" - ServerRequestRawURIAddr = "server.request.uri.raw" - ServerRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies" - ServerRequestCookiesAddr = "server.request.cookies" - ServerRequestQueryAddr = "server.request.query" - ServerRequestPathParamsAddr = "server.request.path_params" - ServerRequestBodyAddr = "server.request.body" - ServerResponseStatusAddr = "server.response.status" - ServerResponseHeadersNoCookiesAddr = "server.response.headers.no_cookies" - HTTPClientIPAddr = "http.client_ip" - UserIDAddr = "usr.id" - ServerIoNetURLAddr = "server.io.net.url" ) -// List of HTTP rule addresses currently supported by the WAF -var supportedAddresses = listener.AddressSet{ - ServerRequestMethodAddr: {}, - ServerRequestRawURIAddr: {}, - ServerRequestHeadersNoCookiesAddr: {}, - ServerRequestCookiesAddr: {}, - ServerRequestQueryAddr: {}, - ServerRequestPathParamsAddr: {}, - ServerRequestBodyAddr: {}, - ServerResponseStatusAddr: {}, - ServerResponseHeadersNoCookiesAddr: {}, - HTTPClientIPAddr: {}, - UserIDAddr: {}, - ServerIoNetURLAddr: {}, - ossec.ServerIOFSFileAddr: {}, - sqlsec.ServerDBStatementAddr: {}, - sqlsec.ServerDBTypeAddr: {}, -} - -// Install registers the HTTP WAF Event Listener on the given root operation. -func Install(wafHandle *waf.Handle, cfg *config.Config, lim limiter.Limiter, root dyngo.Operation) { - if listener := newWafEventListener(wafHandle, cfg, lim); listener != nil { - log.Debug("appsec: registering the HTTP WAF Event Listener") - dyngo.On(root, listener.onEvent) - } +type Feature struct { + APISec appsec.APISecConfig } -type wafEventListener struct { - wafHandle *waf.Handle - config *config.Config - addresses listener.AddressSet - limiter limiter.Limiter - wafDiags waf.Diagnostics - once sync.Once +func (*Feature) String() string { + return "HTTP Security" } -// newWAFEventListener returns the WAF event listener to register in order to enable it. -func newWafEventListener(wafHandle *waf.Handle, cfg *config.Config, limiter limiter.Limiter) *wafEventListener { - if wafHandle == nil { - log.Debug("appsec: no WAF Handle available, the HTTP WAF Event Listener will not be registered") - return nil +func (*Feature) Stop() {} + +func NewHTTPSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.SupportedAddresses.AnyOf(addresses.ServerRequestMethodAddr, + addresses.ServerRequestRawURIAddr, + addresses.ServerRequestHeadersNoCookiesAddr, + addresses.ServerRequestCookiesAddr, + addresses.ServerRequestQueryAddr, + addresses.ServerRequestPathParamsAddr, + addresses.ServerRequestBodyAddr, + addresses.ServerResponseStatusAddr, + addresses.ServerResponseHeadersNoCookiesAddr) { + return nil, nil } - addresses := listener.FilterAddressSet(supportedAddresses, wafHandle) - if len(addresses) == 0 { - log.Debug("appsec: no supported HTTP address is used by currently loaded WAF rules, the HTTP WAF Event Listener will not be registered") - return nil + feature := &Feature{ + APISec: config.APISec, } - return &wafEventListener{ - wafHandle: wafHandle, - config: cfg, - addresses: addresses, - limiter: limiter, - wafDiags: wafHandle.Diagnostics(), - } + dyngo.On(rootOp, feature.OnRequest) + dyngo.OnFinish(rootOp, feature.OnResponse) + return feature, nil } -func (l *wafEventListener) onEvent(op *types.Operation, args types.HandlerOperationArgs) { - wafCtx, err := l.wafHandle.NewContextWithBudget(l.config.WAFTimeout) - if err != nil { - log.Debug("appsec: could not create budgeted WAF context: %v", err) - } - // Early return in the following cases: - // - wafCtx is nil, meaning it was concurrently released - // - err is not nil, meaning context creation failed - if wafCtx == nil || err != nil { - // The WAF event listener got concurrently released - return - } - - if SSRFAddressesPresent(l.addresses) { - dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args types.RoundTripOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Ephemeral: map[string]any{ServerIoNetURLAddr: args.URL}} - })) - } - - if ossec.OSAddressesPresent(l.addresses) { - ossec.RegisterOpenListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter) - } - - if sqlsec.SQLAddressesPresent(l.addresses) { - sqlsec.RegisterSQLListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter) - } - - if _, ok := l.addresses[UserIDAddr]; ok { - // OnUserIDOperationStart happens when appsec.SetUser() is called. We run the WAF and apply actions to - // see if the associated user should be blocked. Since we don't control the execution flow in this case - // (SetUser is SDK), we delegate the responsibility of interrupting the handler to the user. - dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args sharedsec.UserIDOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Persistent: map[string]any{UserIDAddr: args.UserID}} - })) - } +func (feature *Feature) OnRequest(op *httpsec.HandlerOperation, args httpsec.HandlerOperationArgs) { + tags, ip := ClientIPTags(args.Headers, true, args.RemoteAddr) + log.Debug("appsec: http client ip detection returned `%s` given the http headers `%v`", ip, args.Headers) + + op.SetStringTags(tags) + headers := headersRemoveCookies(args.Headers) + headers["host"] = []string{args.Host} + + setRequestHeadersTags(op, headers) + + op.Run(op, + addresses.NewAddressesBuilder(). + WithMethod(args.Method). + WithRawURI(args.RequestURI). + WithHeadersNoCookies(headers). + WithCookies(args.Cookies). + WithQuery(args.QueryParams). + WithPathParams(args.PathParams). + WithClientIP(ip). + Build(), + ) +} - values := make(map[string]any, 8) - for addr := range l.addresses { - switch addr { - case HTTPClientIPAddr: - if args.ClientIP.IsValid() { - values[HTTPClientIPAddr] = args.ClientIP.String() - } - case ServerRequestMethodAddr: - values[ServerRequestMethodAddr] = args.Method - case ServerRequestRawURIAddr: - values[ServerRequestRawURIAddr] = args.RequestURI - case ServerRequestHeadersNoCookiesAddr: - if headers := args.Headers; headers != nil { - values[ServerRequestHeadersNoCookiesAddr] = headers - } - case ServerRequestCookiesAddr: - if cookies := args.Cookies; cookies != nil { - values[ServerRequestCookiesAddr] = cookies - } - case ServerRequestQueryAddr: - if query := args.Query; query != nil { - values[ServerRequestQueryAddr] = query - } - case ServerRequestPathParamsAddr: - if pathParams := args.PathParams; pathParams != nil { - values[ServerRequestPathParamsAddr] = pathParams - } - } - } +func (feature *Feature) OnResponse(op *httpsec.HandlerOperation, resp httpsec.HandlerOperationRes) { + headers := headersRemoveCookies(resp.Headers) + setResponseHeadersTags(op, headers) - wafResult := shared.RunWAF(wafCtx, waf.RunAddressData{Persistent: values}) - if wafResult.HasActions() || wafResult.HasEvents() { - interrupt := shared.ProcessActions(op, wafResult.Actions) - shared.AddSecurityEvents(&op.SecurityEventsHolder, l.limiter, wafResult.Events) - log.Debug("appsec: WAF detected an attack before executing the request") - if interrupt { - wafCtx.Close() - return - } - } + builder := addresses.NewAddressesBuilder(). + WithResponseHeadersNoCookies(headers). + WithResponseStatus(resp.StatusCode) - if _, ok := l.addresses[ServerRequestBodyAddr]; ok { - dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args types.SDKBodyOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Persistent: map[string]any{ServerRequestBodyAddr: args.Body}} - })) + if feature.canExtractSchemas() { + builder = builder.ExtractSchema() } - dyngo.OnFinish(op, func(op *types.Operation, res types.HandlerOperationRes) { - defer wafCtx.Close() - - values = make(map[string]any, 3) - if l.canExtractSchemas() { - // This address will be passed as persistent. The WAF will keep it in store and trigger schema extraction - // for each run. - values["waf.context.processor"] = map[string]any{"extract-schema": true} - } - - if _, ok := l.addresses[ServerResponseStatusAddr]; ok { - // serverResponseStatusAddr is a string address, so we must format the status code... - values[ServerResponseStatusAddr] = fmt.Sprintf("%d", res.Status) - } - - if _, ok := l.addresses[ServerResponseHeadersNoCookiesAddr]; ok && res.Headers != nil { - values[ServerResponseHeadersNoCookiesAddr] = res.Headers - } - - // Run the WAF, ignoring the returned actions - if any - since blocking after the request handler's - // response is not supported at the moment. - wafResult := shared.RunWAF(wafCtx, waf.RunAddressData{Persistent: values}) - - // Add WAF metrics. - shared.AddWAFMonitoringTags(op, l.wafDiags.Version, wafCtx.Stats().Metrics()) - - // Add the following metrics once per instantiation of a WAF handle - l.once.Do(func() { - shared.AddRulesMonitoringTags(op, &l.wafDiags) - op.SetTag(ext.ManualKeep, samplernames.AppSec) - }) - - // Log the attacks if any - if wafResult.HasEvents() { - log.Debug("appsec: attack detected by the waf") - shared.AddSecurityEvents(&op.SecurityEventsHolder, l.limiter, wafResult.Events) - } - for tag, value := range wafResult.Derivatives { - op.AddSerializableTag(tag, value) - } - }) + op.Run(op, builder.Build()) } // canExtractSchemas checks that API Security is enabled and that sampling rate // allows extracting schemas -func (l *wafEventListener) canExtractSchemas() bool { - return l.config.APISec.Enabled && l.config.APISec.SampleRate >= rand.Float64() +func (feature *Feature) canExtractSchemas() bool { + return feature.APISec.Enabled && feature.APISec.SampleRate >= rand.Float64() } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace/http.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/request.go similarity index 71% rename from vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace/http.go rename to vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/request.go index 25ed275e..abd39831 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace/http.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/request.go @@ -1,19 +1,19 @@ // Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. +// Copyright 2024 Datadog, Inc. -package httptrace +package httpsec import ( + "net/http" "net/netip" "os" "strings" "github.com/DataDog/appsec-internal-go/httpsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" ) const ( @@ -23,7 +23,7 @@ const ( var ( // defaultIPHeaders is the default list of IP-related headers leveraged to - // retrieve the public client IP address in ClientIP. + // retrieve the public client IP address in RemoteAddr. defaultIPHeaders = []string{ "x-forwarded-for", "x-real-ip", @@ -34,7 +34,7 @@ var ( "x-cluster-client-ip", "fastly-client-ip", "cf-connecting-ip", - "cf-connecting-ip6", + "cf-connecting-ipv6", } // defaultCollectedHeaders is the default list of HTTP headers collected as @@ -67,7 +67,7 @@ var ( collectedHeadersLookupMap map[string]struct{} // monitoredClientIPHeadersCfg is the list of IP-related headers leveraged to - // retrieve the public client IP address in ClientIP. This is defined at init + // retrieve the public client IP address in RemoteAddr. This is defined at init // time in function of the value of the envClientIPHeader environment variable. monitoredClientIPHeadersCfg []string ) @@ -75,7 +75,7 @@ var ( // ClientIPTags returns the resulting Datadog span tags `http.client_ip` // containing the client IP and `network.client.ip` containing the remote IP. // The tags are present only if a valid ip address has been returned by -// ClientIP(). +// RemoteAddr(). func ClientIPTags(headers map[string][]string, hasCanonicalHeaders bool, remoteAddr string) (tags map[string]string, clientIP netip.Addr) { remoteIP, clientIP := httpsec.ClientIP(headers, hasCanonicalHeaders, remoteAddr, monitoredClientIPHeadersCfg) tags = httpsec.ClientIPTags(remoteIP, clientIP) @@ -101,6 +101,20 @@ func NormalizeHTTPHeaders(headers map[string][]string) (normalized map[string]st return normalized } +// Remove cookies from the request headers and return the map of headers +// Used from `server.request.headers.no_cookies` and server.response.headers.no_cookies` addresses for the WAF +func headersRemoveCookies(headers http.Header) map[string][]string { + headersNoCookies := make(http.Header, len(headers)) + for k, v := range headers { + k := strings.ToLower(k) + if k == "cookie" { + continue + } + headersNoCookies[k] = v + } + return headersNoCookies +} + func normalizeHTTPHeaderName(name string) string { return strings.ToLower(name) } @@ -109,13 +123,6 @@ func normalizeHTTPHeaderValue(values []string) string { return strings.Join(values, ",") } -// SetSecurityEventsTags sets the AppSec-specific span tags when a security event occurred into the service entry span. -func SetSecurityEventsTags(span trace.TagSetter, events []any) { - if err := trace.SetEventSpanTags(span, events); err != nil { - log.Error("appsec: unexpected error while creating the appsec events tags: %v", err) - } -} - func init() { makeCollectedHTTPHeadersLookupMap() readMonitoredClientIPHeadersConfig() @@ -130,7 +137,7 @@ func makeCollectedHTTPHeadersLookupMap() { func readMonitoredClientIPHeadersConfig() { if header := os.Getenv(envClientIPHeader); header != "" { - // Make this header the only one to consider in ClientIP + // Make this header the only one to consider in RemoteAddr monitoredClientIPHeadersCfg = []string{header} // Add this header to the list of collected headers @@ -141,3 +148,20 @@ func readMonitoredClientIPHeadersConfig() { monitoredClientIPHeadersCfg = defaultIPHeaders } } + +// setRequestHeadersTags sets the AppSec-specific request headers span tags. +func setRequestHeadersTags(span trace.TagSetter, headers map[string][]string) { + setHeadersTags(span, "http.request.headers.", headers) +} + +// setResponseHeadersTags sets the AppSec-specific response headers span tags. +func setResponseHeadersTags(span trace.TagSetter, headers map[string][]string) { + setHeadersTags(span, "http.response.headers.", headers) +} + +// setHeadersTags sets the AppSec-specific headers span tags. +func setHeadersTags(span trace.TagSetter, tagPrefix string, headers map[string][]string) { + for h, v := range NormalizeHTTPHeaders(headers) { + span.SetTag(tagPrefix+h, v) + } +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/roundtripper.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/roundtripper.go index 0d510246..b72e8e83 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/roundtripper.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec/roundtripper.go @@ -6,24 +6,35 @@ package httpsec import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - - "github.com/DataDog/appsec-internal-go/limiter" - "github.com/DataDog/go-libddwaf/v3" ) -// RegisterRoundTripperListener registers a listener on outgoing HTTP client requests to run the WAF. -func RegisterRoundTripperListener(op dyngo.Operation, events *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) { - dyngo.On(op, sharedsec.MakeWAFRunListener(events, wafCtx, limiter, func(args types.RoundTripOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Ephemeral: map[string]any{ServerIoNetURLAddr: args.URL}} - })) +type SSRFProtectionFeature struct{} + +func (*SSRFProtectionFeature) String() string { + return "SSRF Protection" +} + +func (*SSRFProtectionFeature) Stop() {} + +func NewSSRFProtectionFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !config.RASP || !config.SupportedAddresses.AnyOf(addresses.ServerIoNetURLAddr) { + return nil, nil + } + + feature := &SSRFProtectionFeature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil } -func SSRFAddressesPresent(addresses listener.AddressSet) bool { - _, urlAddr := addresses[ServerIoNetURLAddr] - return urlAddr +func (*SSRFProtectionFeature) OnStart(op *httpsec.RoundTripOperation, args httpsec.RoundTripOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder().WithURL(args.URL).Build(), + }) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/listener.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/listener.go deleted file mode 100644 index 7435b29e..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/listener.go +++ /dev/null @@ -1,28 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -// Package listener provides functions and types used to listen to AppSec -// instrumentation events produced by code usintrumented using the functions and -// types found in gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter. -package listener - -import waf "github.com/DataDog/go-libddwaf/v3" - -// AddressSet is a set of WAF addresses. -type AddressSet map[string]struct{} - -// FilterAddressSet filters the supplied `supported` address set to only include -// entries referenced by the supplied waf.Handle. -func FilterAddressSet(supported AddressSet, handle *waf.Handle) AddressSet { - result := make(AddressSet, len(supported)) - - for _, addr := range handle.Addresses() { - if _, found := supported[addr]; found { - result[addr] = struct{}{} - } - } - - return result -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec/lfi.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec/lfi.go index 12b32e1b..31616511 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec/lfi.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec/lfi.go @@ -8,39 +8,46 @@ package ossec import ( "os" - "github.com/DataDog/appsec-internal-go/limiter" - waf "github.com/DataDog/go-libddwaf/v3" - "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" ) -const ( - ServerIOFSFileAddr = "server.io.fs.file" -) +type Feature struct{} -func RegisterOpenListener(op dyngo.Operation, eventsHolder *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) { - runWAF := sharedsec.MakeWAFRunListener(eventsHolder, wafCtx, limiter, func(args ossec.OpenOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Ephemeral: map[string]any{ServerIOFSFileAddr: args.Path}} - }) +func (*Feature) String() string { + return "LFI Protection" +} + +func (*Feature) Stop() {} - dyngo.On(op, func(op *ossec.OpenOperation, args ossec.OpenOperationArgs) { - dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { - dyngo.OnFinish(op, func(_ *ossec.OpenOperation, res ossec.OpenOperationRes[*os.File]) { - if res.Err != nil { - *res.Err = e - } - }) +func NewOSSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerIOFSFileAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil +} + +func (*Feature) OnStart(op *ossec.OpenOperation, args ossec.OpenOperationArgs) { + dyngo.OnData(op, func(err *events.BlockingSecurityEvent) { + dyngo.OnFinish(op, func(_ *ossec.OpenOperation, res ossec.OpenOperationRes[*os.File]) { + if res.Err != nil { + *res.Err = err + } }) - runWAF(op, args) }) -} -func OSAddressesPresent(addresses listener.AddressSet) bool { - _, fileAddr := addresses[ServerIOFSFileAddr] - return fileAddr + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithFilePath(args.Path). + Build(), + }) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec/shared.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec/shared.go deleted file mode 100644 index 096a77b8..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec/shared.go +++ /dev/null @@ -1,143 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package sharedsec - -import ( - "encoding/json" - "errors" - - "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" - - "github.com/DataDog/appsec-internal-go/limiter" - waf "github.com/DataDog/go-libddwaf/v3" - wafErrors "github.com/DataDog/go-libddwaf/v3/errors" -) - -const ( - eventRulesVersionTag = "_dd.appsec.event_rules.version" - eventRulesErrorsTag = "_dd.appsec.event_rules.errors" - eventRulesLoadedTag = "_dd.appsec.event_rules.loaded" - eventRulesFailedTag = "_dd.appsec.event_rules.error_count" - wafVersionTag = "_dd.appsec.waf.version" -) - -func RunWAF(wafCtx *waf.Context, values waf.RunAddressData) waf.Result { - result, err := wafCtx.Run(values) - if errors.Is(err, wafErrors.ErrTimeout) { - log.Debug("appsec: waf timeout value reached: %v", err) - } else if err != nil { - log.Error("appsec: unexpected waf error: %v", err) - } - return result -} - -func MakeWAFRunListener[O dyngo.Operation, T dyngo.ArgOf[O]]( - events *trace.SecurityEventsHolder, - wafCtx *waf.Context, - limiter limiter.Limiter, - toRunAddressData func(T) waf.RunAddressData, -) func(O, T) { - return func(op O, args T) { - wafResult := RunWAF(wafCtx, toRunAddressData(args)) - if !wafResult.HasEvents() { - return - } - - log.Debug("appsec: WAF detected a suspicious WAF event") - - ProcessActions(op, wafResult.Actions) - AddSecurityEvents(events, limiter, wafResult.Events) - } -} - -// AddSecurityEvents is a helper function to add sec events to an operation taking into account the rate limiter. -func AddSecurityEvents(holder *trace.SecurityEventsHolder, limiter limiter.Limiter, matches []any) { - if len(matches) > 0 && limiter.Allow() { - holder.AddSecurityEvents(matches) - } -} - -// AddRulesMonitoringTags adds the tags related to security rules monitoring -func AddRulesMonitoringTags(th trace.TagSetter, wafDiags *waf.Diagnostics) { - rInfo := wafDiags.Rules - if rInfo == nil { - return - } - - if len(rInfo.Errors) == 0 { - rInfo.Errors = nil - } - rulesetErrors, err := json.Marshal(wafDiags.Rules.Errors) - if err != nil { - log.Error("appsec: could not marshal the waf ruleset info errors to json") - } - th.SetTag(eventRulesErrorsTag, string(rulesetErrors)) // avoid the tracer's call to fmt.Sprintf on the value - th.SetTag(eventRulesLoadedTag, len(rInfo.Loaded)) - th.SetTag(eventRulesFailedTag, len(rInfo.Failed)) - th.SetTag(wafVersionTag, waf.Version()) -} - -// AddWAFMonitoringTags adds the tags related to the monitoring of the WAF -func AddWAFMonitoringTags(th trace.TagSetter, rulesVersion string, stats map[string]any) { - // Rules version is set for every request to help the backend associate WAF duration metrics with rule version - th.SetTag(eventRulesVersionTag, rulesVersion) - - // Report the stats sent by the WAF - for k, v := range stats { - th.SetTag(k, v) - } -} - -// ProcessActions sends the relevant actions to the operation's data listener. -// It returns true if at least one of those actions require interrupting the request handler -// When SDKError is not nil, this error is sent to the op with EmitData so that the invoked SDK can return it -func ProcessActions(op dyngo.Operation, actions map[string]any) (interrupt bool) { - for aType, params := range actions { - log.Debug("appsec: processing %s action with params %v", aType, params) - actionArray := ActionsFromEntry(aType, params) - if actionArray == nil { - log.Debug("cannot process %s action with params %v", aType, params) - continue - } - for _, a := range actionArray { - a.EmitData(op) - interrupt = interrupt || a.Blocking() - } - } - - // If any of the actions are supposed to interrupt the request, emit a blocking event for the SDK operations - // to return an error. - if interrupt { - dyngo.EmitData(op, &events.BlockingSecurityEvent{}) - } - - return interrupt -} - -// ActionsFromEntry returns one or several actions generated from the WAF returned action entry -// Several actions are returned when the action is of block_request type since we could be blocking HTTP or GRPC -func ActionsFromEntry(actionType string, params any) []sharedsec.Action { - p, ok := params.(map[string]any) - if !ok { - return nil - } - switch actionType { - case "block_request": - return sharedsec.NewBlockAction(p) - case "redirect_request": - return []sharedsec.Action{sharedsec.NewRedirectAction(p)} - case "generate_stack": - return []sharedsec.Action{sharedsec.NewStackTraceAction(p)} - - default: - log.Debug("appsec: unknown action type `%s`", actionType) - return nil - } -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec/sql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec/sql.go index 6e8f046f..3a6fbc07 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec/sql.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec/sql.go @@ -6,31 +6,38 @@ package sqlsec import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace" - - "github.com/DataDog/appsec-internal-go/limiter" - waf "github.com/DataDog/go-libddwaf/v3" ) -const ( - ServerDBStatementAddr = "server.db.statement" - ServerDBTypeAddr = "server.db.system" -) +type Feature struct{} -func RegisterSQLListener(op dyngo.Operation, events *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) { - dyngo.On(op, sharedsec.MakeWAFRunListener(events, wafCtx, limiter, func(args types.SQLOperationArgs) waf.RunAddressData { - return waf.RunAddressData{Ephemeral: map[string]any{ServerDBStatementAddr: args.Query, ServerDBTypeAddr: args.Driver}} - })) +func (*Feature) String() string { + return "SQLi Protection" } -func SQLAddressesPresent(addresses listener.AddressSet) bool { - _, queryAddr := addresses[ServerDBStatementAddr] - _, driverAddr := addresses[ServerDBTypeAddr] +func (*Feature) Stop() {} - return queryAddr || driverAddr +func NewSQLSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerDBTypeAddr, addresses.ServerDBStatementAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.On(rootOp, feature.OnStart) + return feature, nil +} +func (*Feature) OnStart(op *sqlsec.SQLOperation, args sqlsec.SQLOperationArgs) { + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: addresses.NewAddressesBuilder(). + WithDBStatement(args.Query). + WithDBType(args.Driver). + Build(), + }) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace/trace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace/trace.go new file mode 100644 index 00000000..45fb28e9 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace/trace.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package trace + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" +) + +// AppSec-specific span tags that are expected to +// be in the web service entry span (span of type `web`) when AppSec is enabled. +var staticAppsecTags = map[string]any{ + "_dd.appsec.enabled": 1, + "_dd.runtime_family": "go", +} + +type AppsecSpanTransport struct{} + +func (*AppsecSpanTransport) String() string { + return "Appsec Span Transport" +} + +func (*AppsecSpanTransport) Stop() {} + +func NewAppsecSpanTransport(_ *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + ast := &AppsecSpanTransport{} + + dyngo.On(rootOp, ast.OnServiceEntryStart) + dyngo.On(rootOp, ast.OnSpanStart) + + return ast, nil +} + +// OnServiceEntryStart is the start listener of the trace.ServiceEntrySpanOperation start event. +// It listens for tags and serializable tags and sets them on the span when finishing the operation. +func (*AppsecSpanTransport) OnServiceEntryStart(op *trace.ServiceEntrySpanOperation, _ trace.ServiceEntrySpanArgs) { + op.SetTags(staticAppsecTags) + dyngo.OnData(op, op.OnSpanTagEvent) + dyngo.OnData(op, op.OnServiceEntrySpanTagEvent) + dyngo.OnData(op, op.OnJSONServiceEntrySpanTagEvent) + dyngo.OnData(op, op.OnServiceEntrySpanTagsBulkEvent) +} + +// OnSpanStart is the start listener of the trace.SpanOperation start event. +// It listens for tags and sets them on the current span when finishing the operation. +func (*AppsecSpanTransport) OnSpanStart(op *trace.SpanOperation, _ trace.SpanArgs) { + dyngo.OnData(op, op.OnSpanTagEvent) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec/usec.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec/usec.go new file mode 100644 index 00000000..c8a64580 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec/usec.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package usersec + +import ( + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" +) + +type Feature struct{} + +func (*Feature) String() string { + return "User Security" +} + +func (*Feature) Stop() {} + +func NewUserSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if !cfg.SupportedAddresses.AnyOf( + addresses.UserIDAddr, + addresses.UserSessionIDAddr, + addresses.UserLoginSuccessAddr, + addresses.UserLoginFailureAddr) { + return nil, nil + } + + feature := &Feature{} + dyngo.OnFinish(rootOp, feature.OnFinish) + return feature, nil +} + +func (*Feature) OnFinish(op *usersec.UserLoginOperation, res usersec.UserLoginOperationRes) { + builder := addresses.NewAddressesBuilder(). + WithUserID(res.UserID). + WithUserSessionID(res.SessionID) + + if res.Success { + builder = builder.WithUserLoginSuccess() + } else { + builder = builder.WithUserLoginFailure() + } + + dyngo.EmitData(op, waf.RunEvent{ + Operation: op, + RunAddressData: builder.Build(), + }) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/tags.go new file mode 100644 index 00000000..bac41ce5 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/tags.go @@ -0,0 +1,101 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "encoding/json" + "fmt" + + waf "github.com/DataDog/go-libddwaf/v3" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace" + "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames" +) + +const ( + wafSpanTagPrefix = "_dd.appsec." + eventRulesVersionTag = wafSpanTagPrefix + "event_rules.version" + eventRulesErrorsTag = wafSpanTagPrefix + "event_rules.errors" + eventRulesLoadedTag = wafSpanTagPrefix + "event_rules.loaded" + eventRulesFailedTag = wafSpanTagPrefix + "event_rules.error_count" + wafVersionTag = wafSpanTagPrefix + "waf.version" + + // BlockedRequestTag used to convey whether a request is blocked + BlockedRequestTag = "appsec.blocked" +) + +// AddRulesMonitoringTags adds the tags related to security rules monitoring +func AddRulesMonitoringTags(th trace.TagSetter, wafDiags waf.Diagnostics) { + rInfo := wafDiags.Rules + if rInfo == nil { + return + } + + var rulesetErrors []byte + var err error + rulesetErrors, err = json.Marshal(wafDiags.Rules.Errors) + if err != nil { + log.Error("appsec: could not marshal the waf ruleset info errors to json") + } + th.SetTag(eventRulesErrorsTag, string(rulesetErrors)) + th.SetTag(eventRulesLoadedTag, len(rInfo.Loaded)) + th.SetTag(eventRulesFailedTag, len(rInfo.Failed)) + th.SetTag(wafVersionTag, waf.Version()) + th.SetTag(ext.ManualKeep, samplernames.AppSec) +} + +// AddWAFMonitoringTags adds the tags related to the monitoring of the Feature +func AddWAFMonitoringTags(th trace.TagSetter, rulesVersion string, stats map[string]any) { + // Rules version is set for every request to help the backend associate Feature duration metrics with rule version + th.SetTag(eventRulesVersionTag, rulesVersion) + + // Report the stats sent by the Feature + for k, v := range stats { + th.SetTag(wafSpanTagPrefix+k, v) + } +} + +// SetEventSpanTags sets the security event span tags into the service entry span. +func SetEventSpanTags(span trace.TagSetter, events []any) error { + if len(events) == 0 { + return nil + } + + // Set the appsec event span tag + val, err := makeEventTagValue(events) + if err != nil { + return err + } + span.SetTag("_dd.appsec.json", string(val)) + // Keep this span due to the security event + // + // This is a workaround to tell the tracer that the trace was kept by AppSec. + // Passing any other value than `appsec.SamplerAppSec` has no effect. + // Customers should use `span.SetTag(ext.ManualKeep, true)` pattern + // to keep the trace, manually. + span.SetTag(ext.ManualKeep, samplernames.AppSec) + span.SetTag("_dd.origin", "appsec") + // Set the appsec.event tag needed by the appsec backend + span.SetTag("appsec.event", true) + return nil +} + +// Create the value of the security event tag. +func makeEventTagValue(events []any) (json.RawMessage, error) { + type eventTagValue struct { + Triggers []any `json:"triggers"` + } + + tag, err := json.Marshal(eventTagValue{events}) + if err != nil { + return nil, fmt.Errorf("unexpected error while serializing the appsec event span tag: %v", err) + } + + return tag, nil +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/waf.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/waf.go new file mode 100644 index 00000000..308eaa25 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf/waf.go @@ -0,0 +1,128 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package waf + +import ( + "fmt" + "sync" + "time" + + "github.com/DataDog/appsec-internal-go/limiter" + wafv3 "github.com/DataDog/go-libddwaf/v3" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener" + + "gopkg.in/DataDog/dd-trace-go.v1/appsec/events" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf" + "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace" +) + +type Feature struct { + timeout time.Duration + limiter *limiter.TokenTicker + handle *wafv3.Handle + supportedAddrs config.AddressSet + reportRulesTags sync.Once +} + +func NewWAFFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) { + if ok, err := wafv3.Load(); err != nil { + // 1. If there is an error and the loading is not ok: log as an unexpected error case and quit appsec + // Note that we assume here that the test for the unsupported target has been done before calling + // this method, so it is now considered an error for this method + if !ok { + return nil, fmt.Errorf("error while loading libddwaf: %w", err) + } + // 2. If there is an error and the loading is ok: log as an informative error where appsec can be used + log.Error("appsec: non-critical error while loading libddwaf: %v", err) + } + + newHandle, err := wafv3.NewHandle(cfg.RulesManager.Latest, cfg.Obfuscator.KeyRegex, cfg.Obfuscator.ValueRegex) + if err != nil { + return nil, err + } + + cfg.SupportedAddresses = config.NewAddressSet(newHandle.Addresses()) + + tokenTicker := limiter.NewTokenTicker(cfg.TraceRateLimit, cfg.TraceRateLimit) + tokenTicker.Start() + + feature := &Feature{ + handle: newHandle, + timeout: cfg.WAFTimeout, + limiter: tokenTicker, + supportedAddrs: cfg.SupportedAddresses, + } + + dyngo.On(rootOp, feature.onStart) + dyngo.OnFinish(rootOp, feature.onFinish) + + return feature, nil +} + +func (waf *Feature) onStart(op *waf.ContextOperation, _ waf.ContextArgs) { + waf.reportRulesTags.Do(func() { + AddRulesMonitoringTags(op, waf.handle.Diagnostics()) + }) + + ctx, err := waf.handle.NewContextWithBudget(waf.timeout) + if err != nil { + log.Debug("appsec: failed to create Feature context: %v", err) + } + + op.SwapContext(ctx) + op.SetLimiter(waf.limiter) + op.SetSupportedAddresses(waf.supportedAddrs) + + // Run the WAF with the given address data + dyngo.OnData(op, op.OnEvent) + + waf.SetupActionHandlers(op) +} + +func (waf *Feature) SetupActionHandlers(op *waf.ContextOperation) { + // Set the blocking tag on the operation when a blocking event is received + dyngo.OnData(op, func(_ *events.BlockingSecurityEvent) { + op.SetTag(BlockedRequestTag, true) + }) + + // Register the stacktrace if one is requested by a WAF action + dyngo.OnData(op, func(err *actions.StackTraceAction) { + op.AddStackTraces(err.Event) + }) +} + +func (waf *Feature) onFinish(op *waf.ContextOperation, _ waf.ContextRes) { + ctx := op.SwapContext(nil) + if ctx == nil { + return + } + + ctx.Close() + + AddWAFMonitoringTags(op, waf.handle.Diagnostics().Version, ctx.Stats().Metrics()) + if err := SetEventSpanTags(op, op.Events()); err != nil { + log.Debug("appsec: failed to set event span tags: %v", err) + } + + op.SetSerializableTags(op.Derivatives()) + if stacks := op.StackTraces(); len(stacks) > 0 { + op.SetTag(stacktrace.SpanKey, stacktrace.GetSpanValue(stacks...)) + } +} + +func (*Feature) String() string { + return "Web Application Firewall" +} + +func (waf *Feature) Stop() { + waf.limiter.Stop() + waf.handle.Close() +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go index 9dddf65a..ccf6fe76 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/remoteconfig.go @@ -9,10 +9,12 @@ import ( "encoding/json" "errors" "fmt" + "maps" "os" "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion" "gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig" internal "github.com/DataDog/appsec-internal-go/appsec" @@ -41,20 +43,13 @@ func statusesFromUpdate(u remoteconfig.ProductUpdate, ack bool, err error) map[s return statuses } -func mergeMaps[K comparable, V any](m1 map[K]V, m2 map[K]V) map[K]V { - for key, value := range m2 { - m1[key] = value - } - return m1 -} - // combineRCRulesUpdates updates the state of the given RulesManager with the combination of all the provided rules updates func combineRCRulesUpdates(r *config.RulesManager, updates map[string]remoteconfig.ProductUpdate) (statuses map[string]rc.ApplyStatus, err error) { // Spare some re-allocations (but there may still be some because 1 update may contain N configs) statuses = make(map[string]rc.ApplyStatus, len(updates)) // Set the default statuses for all updates to unacknowledged for _, u := range updates { - statuses = mergeMaps(statuses, statusesFromUpdate(u, false, nil)) + maps.Copy(statuses, statusesFromUpdate(u, false, nil)) } updateLoop: @@ -66,9 +61,9 @@ updateLoop: switch p { case rc.ProductASMData: // Merge all rules data entries together and store them as a RulesManager edit entry - rulesData, status := mergeRulesData(u) - statuses = mergeMaps(statuses, status) - r.AddEdit("asmdata", config.RulesFragment{RulesData: rulesData}) + fragment, status := mergeASMDataUpdates(u) + maps.Copy(statuses, status) + r.AddEdit("asmdata", fragment) case rc.ProductASMDD: var ( removalFound = false @@ -83,7 +78,7 @@ updateLoop: } // Already seen a removal or an update, return an error if err != nil { - statuses = mergeMaps(statuses, statusesFromUpdate(u, true, err)) + maps.Copy(statuses, statusesFromUpdate(u, true, err)) break updateLoop } @@ -103,7 +98,7 @@ updateLoop: if removalFound { log.Debug("appsec: Remote config: ASM_DD config removed. Switching back to default rules") r.ChangeBase(config.DefaultRulesFragment(), "") - statuses = mergeMaps(statuses, statusesFromUpdate(u, true, nil)) + maps.Copy(statuses, statusesFromUpdate(u, true, nil)) } continue } @@ -145,7 +140,7 @@ updateLoop: // Set all statuses to ack if no error occured if err == nil { for _, u := range updates { - statuses = mergeMaps(statuses, statusesFromUpdate(u, true, nil)) + maps.Copy(statuses, statusesFromUpdate(u, true, nil)) } } @@ -182,17 +177,18 @@ func (a *appsec) onRCRulesUpdate(updates map[string]remoteconfig.ProductUpdate) r.Compile() log.Debug("appsec: Remote config: final compiled rules: %s", r.String()) + // Replace the RulesManager with the new one holding the new state + a.cfg.RulesManager = &r + // If an error occurs while updating the WAF handle, don't swap the RulesManager and propagate the error // to all config statuses since we can't know which config is the faulty one - if err = a.swapWAF(r.Latest); err != nil { + if err = a.SwapRootOperation(); err != nil { log.Error("appsec: Remote config: could not apply the new security rules: %v", err) for k := range statuses { statuses[k] = genApplyStatus(true, err) } return statuses } - // Replace the RulesManager with the new one holding the new state - a.cfg.RulesManager = &r return statuses } @@ -240,12 +236,41 @@ func (a *appsec) handleASMFeatures(u remoteconfig.ProductUpdate) map[string]rc.A return statuses } -func mergeRulesData(u remoteconfig.ProductUpdate) ([]config.RuleDataEntry, map[string]rc.ApplyStatus) { +func mergeASMDataUpdates(u remoteconfig.ProductUpdate) (config.RulesFragment, map[string]rc.ApplyStatus) { // Following the RFC, merging should only happen when two rules data with the same ID and same Type are received - // allRulesData[ID][Type] will return the rules data of said id and type, if it exists - allRulesData := make(map[string]map[string]config.RuleDataEntry) + type mapKey struct { + id string + typ string + } + mergedRulesData := make(map[mapKey]config.DataEntry) + mergedExclusionData := make(map[mapKey]config.DataEntry) statuses := statusesFromUpdate(u, true, nil) + mergeUpdateEntry := func(mergeMap map[mapKey]config.DataEntry, data []config.DataEntry) { + for _, ruleData := range data { + key := mapKey{id: ruleData.ID, typ: ruleData.Type} + if data, ok := mergeMap[key]; ok { + // Merge rules data entries with the same ID and Type + mergeMap[key] = config.DataEntry{ + ID: data.ID, + Type: data.Type, + Data: mergeRulesDataEntries(data.Data, ruleData.Data), + } + continue + } + + mergeMap[key] = ruleData + } + } + + mapValues := func(m map[mapKey]config.DataEntry) []config.DataEntry { + values := make([]config.DataEntry, 0, len(m)) + for _, v := range m { + values = append(values, v) + } + return values + } + for path, raw := range u { log.Debug("appsec: Remote config: processing %s", path) @@ -257,36 +282,30 @@ func mergeRulesData(u remoteconfig.ProductUpdate) ([]config.RuleDataEntry, map[s continue } - var rulesData config.RulesData - if err := json.Unmarshal(raw, &rulesData); err != nil { + var asmdataUpdate struct { + RulesData []config.DataEntry `json:"rules_data,omitempty"` + ExclusionData []config.DataEntry `json:"exclusion_data,omitempty"` + } + if err := json.Unmarshal(raw, &asmdataUpdate); err != nil { log.Debug("appsec: Remote config: error while unmarshalling payload for %s: %v. Configuration won't be applied.", path, err) statuses[path] = genApplyStatus(false, err) continue } - // Check each entry against allRulesData to see if merging is necessary - for _, ruleData := range rulesData.RulesData { - if allRulesData[ruleData.ID] == nil { - allRulesData[ruleData.ID] = make(map[string]config.RuleDataEntry) - } - if data, ok := allRulesData[ruleData.ID][ruleData.Type]; ok { - // Merge rules data entries with the same ID and Type - data.Data = mergeRulesDataEntries(data.Data, ruleData.Data) - allRulesData[ruleData.ID][ruleData.Type] = data - } else { - allRulesData[ruleData.ID][ruleData.Type] = ruleData - } - } + mergeUpdateEntry(mergedExclusionData, asmdataUpdate.ExclusionData) + mergeUpdateEntry(mergedRulesData, asmdataUpdate.RulesData) } - // Aggregate all the rules data before passing it over to the WAF - var rulesData []config.RuleDataEntry - for _, m := range allRulesData { - for _, data := range m { - rulesData = append(rulesData, data) - } + var fragment config.RulesFragment + if len(mergedRulesData) > 0 { + fragment.RulesData = mapValues(mergedRulesData) + } + + if len(mergedExclusionData) > 0 { + fragment.ExclusionData = mapValues(mergedExclusionData) } - return rulesData, statuses + + return fragment, statuses } // mergeRulesDataEntries merges two slices of rules data entries together, removing duplicates and @@ -372,6 +391,11 @@ var blockingCapabilities = [...]remoteconfig.Capability{ remoteconfig.ASMCustomRules, remoteconfig.ASMCustomBlockingResponse, remoteconfig.ASMTrustedIPs, + remoteconfig.ASMExclusionData, + remoteconfig.ASMEndpointFingerprinting, + remoteconfig.ASMSessionFingerprinting, + remoteconfig.ASMNetworkFingerprinting, + remoteconfig.ASMHeaderFingerprinting, } func (a *appsec) enableRCBlocking() { @@ -409,7 +433,14 @@ func (a *appsec) enableRASP() { if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSSRF); err != nil { log.Debug("appsec: Remote config: couldn't register RASP SSRF: %v", err) } - // TODO: register other RASP capabilities when supported + if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSQLI); err != nil { + log.Debug("appsec: Remote config: couldn't register RASP SQLI: %v", err) + } + if orchestrion.Enabled() { + if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPLFI); err != nil { + log.Debug("appsec: Remote config: couldn't register RASP LFI: %v", err) + } + } } func (a *appsec) disableRCBlocking() { diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/securityholder.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/securityholder.go deleted file mode 100644 index d61ae8f4..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/securityholder.go +++ /dev/null @@ -1,49 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package trace - -import ( - "sync" -) - -// SecurityEventsHolder is a wrapper around a thread safe security events slice. -// The purpose of this struct is to be used by composition in an Operation to -// allow said operation to handle security events addition/retrieval. -type SecurityEventsHolder struct { - events []any - mu sync.RWMutex -} - -// AddSecurityEvents adds the security events to the collected events list. -// Thread safe. -func (s *SecurityEventsHolder) AddSecurityEvents(events []any) { - if len(events) == 0 { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - s.events = append(s.events, events...) -} - -// Events returns the list of stored events. -func (s *SecurityEventsHolder) Events() []any { - s.mu.RLock() - defer s.mu.RUnlock() - // Return a copy, since the lock is released upon return. - clone := make([]any, len(s.events)) - for i, e := range s.events { - clone[i] = e - } - return clone -} - -// ClearEvents clears the list of stored events -func (s *SecurityEventsHolder) ClearEvents() { - s.mu.Lock() - defer s.mu.Unlock() - s.events = s.events[0:0] -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/tagsholder.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/tagsholder.go deleted file mode 100644 index 65ead108..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/tagsholder.go +++ /dev/null @@ -1,70 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package trace - -import ( - "encoding/json" - "sync" - - "gopkg.in/DataDog/dd-trace-go.v1/internal/log" -) - -type serializableTag struct { - tag any -} - -func (t serializableTag) MarshalJSON() ([]byte, error) { - return json.Marshal(t.tag) -} - -// TagsHolder wraps a map holding tags. The purpose of this struct is to be -// used by composition in an Operation to allow said operation to handle tags -// addition/retrieval. -type TagsHolder struct { - tags map[string]any - mu sync.RWMutex -} - -// NewTagsHolder returns a new instance of a TagsHolder struct. -func NewTagsHolder() TagsHolder { - return TagsHolder{tags: make(map[string]any)} -} - -// SetTag adds the key/value pair to the tags map -func (m *TagsHolder) SetTag(k string, v any) { - m.mu.Lock() - defer m.mu.Unlock() - m.tags[k] = v -} - -// AddSerializableTag adds the key/value pair to the tags map. Value is serialized as JSON. -func (m *TagsHolder) AddSerializableTag(k string, v any) { - m.mu.Lock() - defer m.mu.Unlock() - m.tags[k] = serializableTag{tag: v} -} - -// Tags returns a copy of the aggregated tags map (normal and serialized) -func (m *TagsHolder) Tags() map[string]any { - tags := make(map[string]any, len(m.tags)) - m.mu.RLock() - defer m.mu.RUnlock() - for k, v := range m.tags { - tags[k] = v - marshaler, ok := v.(serializableTag) - if !ok { - continue - } - if marshaled, err := marshaler.MarshalJSON(); err == nil { - tags[k] = string(marshaled) - } else { - log.Debug("appsec: could not marshal serializable tag %s: %v", k, err) - } - } - return tags -} - -var _ TagSetter = (*TagsHolder)(nil) // *TagsHolder must implement TagSetter diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/trace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/trace.go deleted file mode 100644 index 0700cc85..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/trace.go +++ /dev/null @@ -1,85 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -// Package trace provides functions to annotate trace spans with AppSec related -// information. -package trace - -import ( - "encoding/json" - "fmt" - - "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" - "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames" -) - -// BlockedRequestTag used to convey whether a request is blocked -const BlockedRequestTag = "appsec.blocked" - -// TagSetter is the interface needed to set a span tag. -type TagSetter interface { - SetTag(string, any) -} - -// NoopTagSetter is a TagSetter that does nothing. Useful when no tracer -// Span is available, but a TagSetter is assumed. -type NoopTagSetter struct{} - -func (NoopTagSetter) SetTag(string, any) { - // Do nothing -} - -// SetAppSecEnabledTags sets the AppSec-specific span tags that are expected to -// be in the web service entry span (span of type `web`) when AppSec is enabled. -func SetAppSecEnabledTags(span TagSetter) { - span.SetTag("_dd.appsec.enabled", 1) - span.SetTag("_dd.runtime_family", "go") -} - -// SetEventSpanTags sets the security event span tags into the service entry span. -func SetEventSpanTags(span TagSetter, events []any) error { - if len(events) == 0 { - return nil - } - - // Set the appsec event span tag - val, err := makeEventTagValue(events) - if err != nil { - return err - } - span.SetTag("_dd.appsec.json", string(val)) - // Keep this span due to the security event - // - // This is a workaround to tell the tracer that the trace was kept by AppSec. - // Passing any other value than `appsec.SamplerAppSec` has no effect. - // Customers should use `span.SetTag(ext.ManualKeep, true)` pattern - // to keep the trace, manually. - span.SetTag(ext.ManualKeep, samplernames.AppSec) - span.SetTag("_dd.origin", "appsec") - // Set the appsec.event tag needed by the appsec backend - span.SetTag("appsec.event", true) - return nil -} - -// SetTags fills the span tags using the key/value pairs found in `tags` -func SetTags[V any](span TagSetter, tags map[string]V) { - for k, v := range tags { - span.SetTag(k, v) - } -} - -// Create the value of the security event tag. -func makeEventTagValue(events []any) (json.RawMessage, error) { - type eventTagValue struct { - Triggers []any `json:"triggers"` - } - - tag, err := json.Marshal(eventTagValue{events}) - if err != nil { - return nil, fmt.Errorf("unexpected error while serializing the appsec event span tag: %v", err) - } - - return tag, nil -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go deleted file mode 100644 index c1ec4c47..00000000 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/waf.go +++ /dev/null @@ -1,66 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016 Datadog, Inc. - -package appsec - -import ( - "github.com/DataDog/appsec-internal-go/limiter" - waf "github.com/DataDog/go-libddwaf/v3" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config" - "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo" -) - -func (a *appsec) swapWAF(rules config.RulesFragment) (err error) { - // Instantiate a new WAF handle and verify its state - newHandle, err := waf.NewHandle(rules, a.cfg.Obfuscator.KeyRegex, a.cfg.Obfuscator.ValueRegex) - if err != nil { - return err - } - - // Close the WAF handle in case of an error in what's following - defer func() { - if err != nil { - newHandle.Close() - } - }() - - newRoot := dyngo.NewRootOperation() - for _, fn := range wafEventListeners { - fn(newHandle, a.cfg, a.limiter, newRoot) - } - - // Hot-swap dyngo's root operation - dyngo.SwapRootOperation(newRoot) - - // Close old handle. - // Note that concurrent requests are still using it, and it will be released - // only when no more requests use it. - // TODO: implement in dyngo ref-counting of the root operation so we can - // rely on a Finish event listener on the root operation instead? - // Avoiding saving the current WAF handle would guarantee no one is - // accessing a.wafHandle while we swap - oldHandle := a.wafHandle - a.wafHandle = newHandle - if oldHandle != nil { - oldHandle.Close() - } - - return nil -} - -type wafEventListener func(*waf.Handle, *config.Config, limiter.Limiter, dyngo.Operation) - -// wafEventListeners is the global list of event listeners registered by contribs at init time. This -// is thread-safe assuming all writes (via AddWAFEventListener) are performed within `init` -// functions; so this is written to only during initialization, and is read from concurrently only -// during runtime when no writes are happening anymore. -var wafEventListeners []wafEventListener - -// AddWAFEventListener adds a new WAF event listener to be registered whenever a new root operation -// is created. The normal way to use this is to call it from a `func init() {}` so that it is -// guaranteed to have happened before any listened to event may be emitted. -func AddWAFEventListener(fn wafEventListener) { - wafEventListeners = append(wafEventListeners, fn) -} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/env.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/env.go index ebc00fcb..66e6de76 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/env.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/env.go @@ -24,4 +24,17 @@ const ( // This environment variable should be set to your Datadog API key, allowing the agentless mode to authenticate and // send data directly to the Datadog platform. APIKeyEnvironmentVariable = "DD_API_KEY" + + // CIVisibilityTestSessionNameEnvironmentVariable indicate the test session name to be used on CI Visibility payloads + CIVisibilityTestSessionNameEnvironmentVariable = "DD_TEST_SESSION_NAME" + + // CIVisibilityFlakyRetryEnabledEnvironmentVariable kill-switch that allows to explicitly disable retries even if the remote setting is enabled. + // This environment variable should be set to "0" or "false" to disable the flaky retry feature. + CIVisibilityFlakyRetryEnabledEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED" + + // CIVisibilityFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for a single test case. + CIVisibilityFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_COUNT" + + // CIVisibilityTotalFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for the entire session. + CIVisibilityTotalFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT" ) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/tags.go index 4563cb88..0a6d6908 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/tags.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/tags.go @@ -10,6 +10,10 @@ const ( // This tag helps in identifying the source of the trace data. Origin = "_dd.origin" + // LogicalCPUCores is a tag used to indicate the number of logical cpu cores + // This tag is used by the backend to perform calculations + LogicalCPUCores = "_dd.host.vcpu_count" + // CIAppTestOrigin defines the CIApp test origin value. // This constant is used to tag traces that originate from CIApp test executions. CIAppTestOrigin = "ciapp-test" diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/test_tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/test_tags.go index 3a621a20..ccd4fa46 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/test_tags.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants/test_tags.go @@ -46,6 +46,10 @@ const ( // This constant is used to tag traces with the line number in the source file where the test starts. TestSourceStartLine = "test.source.start" + // TestSourceEndLine indicates the line of the source file where the test ends. + // This constant is used to tag traces with the line number in the source file where the test ends. + TestSourceEndLine = "test.source.end" + // TestCodeOwners indicates the test code owners. // This constant is used to tag traces with the code owners responsible for the test. TestCodeOwners = "test.codeowners" @@ -61,6 +65,14 @@ const ( // TestCommandWorkingDirectory indicates the test command working directory relative to the source root. // This constant is used to tag traces with the working directory path relative to the source root. TestCommandWorkingDirectory = "test.working_directory" + + // TestSessionName indicates the test session name + // This constant is used to tag traces with the test session name + TestSessionName = "test_session.name" + + // TestIsRetry indicates a retry execution + // This constant is used to tag test events that are part of a retry execution + TestIsRetry = "test.is_retry" ) // Define valid test status types. diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/ci_providers.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/ci_providers.go new file mode 100644 index 00000000..9b714ce5 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/ci_providers.go @@ -0,0 +1,578 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "encoding/json" + "fmt" + "os" + "regexp" + "sort" + "strings" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +// providerType defines a function type that returns a map of string key-value pairs. +type providerType = func() map[string]string + +// providers maps environment variable names to their corresponding CI provider extraction functions. +var providers = map[string]providerType{ + "APPVEYOR": extractAppveyor, + "TF_BUILD": extractAzurePipelines, + "BITBUCKET_COMMIT": extractBitbucket, + "BUDDY": extractBuddy, + "BUILDKITE": extractBuildkite, + "CIRCLECI": extractCircleCI, + "GITHUB_SHA": extractGithubActions, + "GITLAB_CI": extractGitlab, + "JENKINS_URL": extractJenkins, + "TEAMCITY_VERSION": extractTeamcity, + "TRAVIS": extractTravis, + "BITRISE_BUILD_SLUG": extractBitrise, + "CF_BUILD_ID": extractCodefresh, + "CODEBUILD_INITIATOR": extractAwsCodePipeline, +} + +// getEnvVarsJSON returns a JSON representation of the specified environment variables. +func getEnvVarsJSON(envVars ...string) ([]byte, error) { + envVarsMap := make(map[string]string) + for _, envVar := range envVars { + value := os.Getenv(envVar) + if value != "" { + envVarsMap[envVar] = value + } + } + return json.Marshal(envVarsMap) +} + +// getProviderTags extracts CI information from environment variables. +func getProviderTags() map[string]string { + tags := map[string]string{} + for key, provider := range providers { + if _, ok := os.LookupEnv(key); !ok { + continue + } + tags = provider() + } + + // replace with user specific tags + replaceWithUserSpecificTags(tags) + + // Normalize tags + normalizeTags(tags) + + // Expand ~ + if tag, ok := tags[constants.CIWorkspacePath]; ok && tag != "" { + tags[constants.CIWorkspacePath] = ExpandPath(tag) + } + + // remove empty values + for tag, value := range tags { + if value == "" { + delete(tags, tag) + } + } + + if log.DebugEnabled() { + if providerName, ok := tags[constants.CIProviderName]; ok { + log.Debug("civisibility: detected ci provider: %v", providerName) + } else { + log.Debug("civisibility: no ci provider was detected.") + } + } + + return tags +} + +// normalizeTags normalizes specific tags to remove prefixes and sensitive information. +func normalizeTags(tags map[string]string) { + if tag, ok := tags[constants.GitBranch]; ok && tag != "" { + if strings.Contains(tag, "refs/tags") || strings.Contains(tag, "origin/tags") || strings.Contains(tag, "refs/heads/tags") { + tags[constants.GitTag] = normalizeRef(tag) + } + tags[constants.GitBranch] = normalizeRef(tag) + } + if tag, ok := tags[constants.GitTag]; ok && tag != "" { + tags[constants.GitTag] = normalizeRef(tag) + } + if tag, ok := tags[constants.GitRepositoryURL]; ok && tag != "" { + tags[constants.GitRepositoryURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIPipelineURL]; ok && tag != "" { + tags[constants.CIPipelineURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIJobURL]; ok && tag != "" { + tags[constants.CIJobURL] = filterSensitiveInfo(tag) + } + if tag, ok := tags[constants.CIEnvVars]; ok && tag != "" { + tags[constants.CIEnvVars] = filterSensitiveInfo(tag) + } +} + +// replaceWithUserSpecificTags replaces certain tags with user-specific environment variable values. +func replaceWithUserSpecificTags(tags map[string]string) { + replace := func(tagName, envName string) { + tags[tagName] = getEnvironmentVariableIfIsNotEmpty(envName, tags[tagName]) + } + + replace(constants.GitBranch, "DD_GIT_BRANCH") + replace(constants.GitTag, "DD_GIT_TAG") + replace(constants.GitRepositoryURL, "DD_GIT_REPOSITORY_URL") + replace(constants.GitCommitSHA, "DD_GIT_COMMIT_SHA") + replace(constants.GitCommitMessage, "DD_GIT_COMMIT_MESSAGE") + replace(constants.GitCommitAuthorName, "DD_GIT_COMMIT_AUTHOR_NAME") + replace(constants.GitCommitAuthorEmail, "DD_GIT_COMMIT_AUTHOR_EMAIL") + replace(constants.GitCommitAuthorDate, "DD_GIT_COMMIT_AUTHOR_DATE") + replace(constants.GitCommitCommitterName, "DD_GIT_COMMIT_COMMITTER_NAME") + replace(constants.GitCommitCommitterEmail, "DD_GIT_COMMIT_COMMITTER_EMAIL") + replace(constants.GitCommitCommitterDate, "DD_GIT_COMMIT_COMMITTER_DATE") +} + +// getEnvironmentVariableIfIsNotEmpty returns the environment variable value if it is not empty, otherwise returns the default value. +func getEnvironmentVariableIfIsNotEmpty(key string, defaultValue string) string { + if value, ok := os.LookupEnv(key); ok && value != "" { + return value + } + return defaultValue +} + +// normalizeRef normalizes a Git reference name by removing common prefixes. +func normalizeRef(name string) string { + // Define the prefixes to remove + prefixes := []string{"refs/heads/", "refs/", "origin/", "tags/"} + + // Iterate over prefixes and remove them if present + for _, prefix := range prefixes { + if strings.HasPrefix(name, prefix) { + name = strings.TrimPrefix(name, prefix) + } + } + return name +} + +// firstEnv returns the value of the first non-empty environment variable from the provided list. +func firstEnv(keys ...string) string { + for _, key := range keys { + if value, ok := os.LookupEnv(key); ok { + if value != "" { + return value + } + } + } + return "" +} + +// extractAppveyor extracts CI information specific to Appveyor. +func extractAppveyor() map[string]string { + tags := map[string]string{} + url := fmt.Sprintf("https://ci.appveyor.com/project/%s/builds/%s", os.Getenv("APPVEYOR_REPO_NAME"), os.Getenv("APPVEYOR_BUILD_ID")) + tags[constants.CIProviderName] = "appveyor" + if os.Getenv("APPVEYOR_REPO_PROVIDER") == "github" { + tags[constants.GitRepositoryURL] = fmt.Sprintf("https://github.com/%s.git", os.Getenv("APPVEYOR_REPO_NAME")) + } else { + tags[constants.GitRepositoryURL] = os.Getenv("APPVEYOR_REPO_NAME") + } + + tags[constants.GitCommitSHA] = os.Getenv("APPVEYOR_REPO_COMMIT") + tags[constants.GitBranch] = firstEnv("APPVEYOR_PULL_REQUEST_HEAD_REPO_BRANCH", "APPVEYOR_REPO_BRANCH") + tags[constants.GitTag] = os.Getenv("APPVEYOR_REPO_TAG_NAME") + + tags[constants.CIWorkspacePath] = os.Getenv("APPVEYOR_BUILD_FOLDER") + tags[constants.CIPipelineID] = os.Getenv("APPVEYOR_BUILD_ID") + tags[constants.CIPipelineName] = os.Getenv("APPVEYOR_REPO_NAME") + tags[constants.CIPipelineNumber] = os.Getenv("APPVEYOR_BUILD_NUMBER") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = url + tags[constants.GitCommitMessage] = fmt.Sprintf("%s\n%s", os.Getenv("APPVEYOR_REPO_COMMIT_MESSAGE"), os.Getenv("APPVEYOR_REPO_COMMIT_MESSAGE_EXTENDED")) + tags[constants.GitCommitAuthorName] = os.Getenv("APPVEYOR_REPO_COMMIT_AUTHOR") + tags[constants.GitCommitAuthorEmail] = os.Getenv("APPVEYOR_REPO_COMMIT_AUTHOR_EMAIL") + return tags +} + +// extractAzurePipelines extracts CI information specific to Azure Pipelines. +func extractAzurePipelines() map[string]string { + tags := map[string]string{} + baseURL := fmt.Sprintf("%s%s/_build/results?buildId=%s", os.Getenv("SYSTEM_TEAMFOUNDATIONSERVERURI"), os.Getenv("SYSTEM_TEAMPROJECTID"), os.Getenv("BUILD_BUILDID")) + pipelineURL := baseURL + jobURL := fmt.Sprintf("%s&view=logs&j=%s&t=%s", baseURL, os.Getenv("SYSTEM_JOBID"), os.Getenv("SYSTEM_TASKINSTANCEID")) + branchOrTag := firstEnv("SYSTEM_PULLREQUEST_SOURCEBRANCH", "BUILD_SOURCEBRANCH", "BUILD_SOURCEBRANCHNAME") + branch := "" + tag := "" + if strings.Contains(branchOrTag, "tags/") { + tag = branchOrTag + } else { + branch = branchOrTag + } + tags[constants.CIProviderName] = "azurepipelines" + tags[constants.CIWorkspacePath] = os.Getenv("BUILD_SOURCESDIRECTORY") + + tags[constants.CIPipelineID] = os.Getenv("BUILD_BUILDID") + tags[constants.CIPipelineName] = os.Getenv("BUILD_DEFINITIONNAME") + tags[constants.CIPipelineNumber] = os.Getenv("BUILD_BUILDID") + tags[constants.CIPipelineURL] = pipelineURL + + tags[constants.CIStageName] = os.Getenv("SYSTEM_STAGEDISPLAYNAME") + + tags[constants.CIJobName] = os.Getenv("SYSTEM_JOBDISPLAYNAME") + tags[constants.CIJobURL] = jobURL + + tags[constants.GitRepositoryURL] = firstEnv("SYSTEM_PULLREQUEST_SOURCEREPOSITORYURI", "BUILD_REPOSITORY_URI") + tags[constants.GitCommitSHA] = firstEnv("SYSTEM_PULLREQUEST_SOURCECOMMITID", "BUILD_SOURCEVERSION") + tags[constants.GitBranch] = branch + tags[constants.GitTag] = tag + tags[constants.GitCommitMessage] = os.Getenv("BUILD_SOURCEVERSIONMESSAGE") + tags[constants.GitCommitAuthorName] = os.Getenv("BUILD_REQUESTEDFORID") + tags[constants.GitCommitAuthorEmail] = os.Getenv("BUILD_REQUESTEDFOREMAIL") + + jsonString, err := getEnvVarsJSON("SYSTEM_TEAMPROJECTID", "BUILD_BUILDID", "SYSTEM_JOBID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractBitrise extracts CI information specific to Bitrise. +func extractBitrise() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "bitrise" + tags[constants.GitRepositoryURL] = os.Getenv("GIT_REPOSITORY_URL") + tags[constants.GitCommitSHA] = firstEnv("BITRISE_GIT_COMMIT", "GIT_CLONE_COMMIT_HASH") + tags[constants.GitBranch] = firstEnv("BITRISEIO_GIT_BRANCH_DEST", "BITRISE_GIT_BRANCH") + tags[constants.GitTag] = os.Getenv("BITRISE_GIT_TAG") + tags[constants.CIWorkspacePath] = os.Getenv("BITRISE_SOURCE_DIR") + tags[constants.CIPipelineID] = os.Getenv("BITRISE_BUILD_SLUG") + tags[constants.CIPipelineName] = os.Getenv("BITRISE_TRIGGERED_WORKFLOW_ID") + tags[constants.CIPipelineNumber] = os.Getenv("BITRISE_BUILD_NUMBER") + tags[constants.CIPipelineURL] = os.Getenv("BITRISE_BUILD_URL") + tags[constants.GitCommitMessage] = os.Getenv("BITRISE_GIT_MESSAGE") + return tags +} + +// extractBitbucket extracts CI information specific to Bitbucket. +func extractBitbucket() map[string]string { + tags := map[string]string{} + url := fmt.Sprintf("https://bitbucket.org/%s/addon/pipelines/home#!/results/%s", os.Getenv("BITBUCKET_REPO_FULL_NAME"), os.Getenv("BITBUCKET_BUILD_NUMBER")) + tags[constants.CIProviderName] = "bitbucket" + tags[constants.GitRepositoryURL] = firstEnv("BITBUCKET_GIT_SSH_ORIGIN", "BITBUCKET_GIT_HTTP_ORIGIN") + tags[constants.GitCommitSHA] = os.Getenv("BITBUCKET_COMMIT") + tags[constants.GitBranch] = os.Getenv("BITBUCKET_BRANCH") + tags[constants.GitTag] = os.Getenv("BITBUCKET_TAG") + tags[constants.CIWorkspacePath] = os.Getenv("BITBUCKET_CLONE_DIR") + tags[constants.CIPipelineID] = strings.Trim(os.Getenv("BITBUCKET_PIPELINE_UUID"), "{}") + tags[constants.CIPipelineNumber] = os.Getenv("BITBUCKET_BUILD_NUMBER") + tags[constants.CIPipelineName] = os.Getenv("BITBUCKET_REPO_FULL_NAME") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = url + return tags +} + +// extractBuddy extracts CI information specific to Buddy. +func extractBuddy() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "buddy" + tags[constants.CIPipelineID] = fmt.Sprintf("%s/%s", os.Getenv("BUDDY_PIPELINE_ID"), os.Getenv("BUDDY_EXECUTION_ID")) + tags[constants.CIPipelineName] = os.Getenv("BUDDY_PIPELINE_NAME") + tags[constants.CIPipelineNumber] = os.Getenv("BUDDY_EXECUTION_ID") + tags[constants.CIPipelineURL] = os.Getenv("BUDDY_EXECUTION_URL") + tags[constants.GitCommitSHA] = os.Getenv("BUDDY_EXECUTION_REVISION") + tags[constants.GitRepositoryURL] = os.Getenv("BUDDY_SCM_URL") + tags[constants.GitBranch] = os.Getenv("BUDDY_EXECUTION_BRANCH") + tags[constants.GitTag] = os.Getenv("BUDDY_EXECUTION_TAG") + tags[constants.GitCommitMessage] = os.Getenv("BUDDY_EXECUTION_REVISION_MESSAGE") + tags[constants.GitCommitCommitterName] = os.Getenv("BUDDY_EXECUTION_REVISION_COMMITTER_NAME") + tags[constants.GitCommitCommitterEmail] = os.Getenv("BUDDY_EXECUTION_REVISION_COMMITTER_EMAIL") + return tags +} + +// extractBuildkite extracts CI information specific to Buildkite. +func extractBuildkite() map[string]string { + tags := map[string]string{} + tags[constants.GitBranch] = os.Getenv("BUILDKITE_BRANCH") + tags[constants.GitCommitSHA] = os.Getenv("BUILDKITE_COMMIT") + tags[constants.GitRepositoryURL] = os.Getenv("BUILDKITE_REPO") + tags[constants.GitTag] = os.Getenv("BUILDKITE_TAG") + tags[constants.CIPipelineID] = os.Getenv("BUILDKITE_BUILD_ID") + tags[constants.CIPipelineName] = os.Getenv("BUILDKITE_PIPELINE_SLUG") + tags[constants.CIPipelineNumber] = os.Getenv("BUILDKITE_BUILD_NUMBER") + tags[constants.CIPipelineURL] = os.Getenv("BUILDKITE_BUILD_URL") + tags[constants.CIJobURL] = fmt.Sprintf("%s#%s", os.Getenv("BUILDKITE_BUILD_URL"), os.Getenv("BUILDKITE_JOB_ID")) + tags[constants.CIProviderName] = "buildkite" + tags[constants.CIWorkspacePath] = os.Getenv("BUILDKITE_BUILD_CHECKOUT_PATH") + tags[constants.GitCommitMessage] = os.Getenv("BUILDKITE_MESSAGE") + tags[constants.GitCommitAuthorName] = os.Getenv("BUILDKITE_BUILD_AUTHOR") + tags[constants.GitCommitAuthorEmail] = os.Getenv("BUILDKITE_BUILD_AUTHOR_EMAIL") + tags[constants.CINodeName] = os.Getenv("BUILDKITE_AGENT_ID") + + jsonString, err := getEnvVarsJSON("BUILDKITE_BUILD_ID", "BUILDKITE_JOB_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + var extraTags []string + envVars := os.Environ() + for _, envVar := range envVars { + if strings.HasPrefix(envVar, "BUILDKITE_AGENT_META_DATA_") { + envVarAsTag := envVar + envVarAsTag = strings.TrimPrefix(envVarAsTag, "BUILDKITE_AGENT_META_DATA_") + envVarAsTag = strings.ToLower(envVarAsTag) + envVarAsTag = strings.Replace(envVarAsTag, "=", ":", 1) + extraTags = append(extraTags, envVarAsTag) + } + } + + if len(extraTags) != 0 { + // HACK: Sorting isn't actually needed, but it simplifies testing if the order is consistent + sort.Sort(sort.Reverse(sort.StringSlice(extraTags))) + jsonString, err = json.Marshal(extraTags) + if err == nil { + tags[constants.CINodeLabels] = string(jsonString) + } + } + + return tags +} + +// extractCircleCI extracts CI information specific to CircleCI. +func extractCircleCI() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "circleci" + tags[constants.GitRepositoryURL] = os.Getenv("CIRCLE_REPOSITORY_URL") + tags[constants.GitCommitSHA] = os.Getenv("CIRCLE_SHA1") + tags[constants.GitTag] = os.Getenv("CIRCLE_TAG") + tags[constants.GitBranch] = os.Getenv("CIRCLE_BRANCH") + tags[constants.CIWorkspacePath] = os.Getenv("CIRCLE_WORKING_DIRECTORY") + tags[constants.CIPipelineID] = os.Getenv("CIRCLE_WORKFLOW_ID") + tags[constants.CIPipelineName] = os.Getenv("CIRCLE_PROJECT_REPONAME") + tags[constants.CIPipelineNumber] = os.Getenv("CIRCLE_BUILD_NUM") + tags[constants.CIPipelineURL] = fmt.Sprintf("https://app.circleci.com/pipelines/workflows/%s", os.Getenv("CIRCLE_WORKFLOW_ID")) + tags[constants.CIJobName] = os.Getenv("CIRCLE_JOB") + tags[constants.CIJobURL] = os.Getenv("CIRCLE_BUILD_URL") + + jsonString, err := getEnvVarsJSON("CIRCLE_BUILD_NUM", "CIRCLE_WORKFLOW_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractGithubActions extracts CI information specific to GitHub Actions. +func extractGithubActions() map[string]string { + tags := map[string]string{} + branchOrTag := firstEnv("GITHUB_HEAD_REF", "GITHUB_REF") + tag := "" + branch := "" + if strings.Contains(branchOrTag, "tags/") { + tag = branchOrTag + } else { + branch = branchOrTag + } + + serverURL := os.Getenv("GITHUB_SERVER_URL") + if serverURL == "" { + serverURL = "https://github.com" + } + serverURL = strings.TrimSuffix(serverURL, "/") + + rawRepository := fmt.Sprintf("%s/%s", serverURL, os.Getenv("GITHUB_REPOSITORY")) + pipelineID := os.Getenv("GITHUB_RUN_ID") + commitSha := os.Getenv("GITHUB_SHA") + + tags[constants.CIProviderName] = "github" + tags[constants.GitRepositoryURL] = rawRepository + ".git" + tags[constants.GitCommitSHA] = commitSha + tags[constants.GitBranch] = branch + tags[constants.GitTag] = tag + tags[constants.CIWorkspacePath] = os.Getenv("GITHUB_WORKSPACE") + tags[constants.CIPipelineID] = pipelineID + tags[constants.CIPipelineNumber] = os.Getenv("GITHUB_RUN_NUMBER") + tags[constants.CIPipelineName] = os.Getenv("GITHUB_WORKFLOW") + tags[constants.CIJobURL] = fmt.Sprintf("%s/commit/%s/checks", rawRepository, commitSha) + tags[constants.CIJobName] = os.Getenv("GITHUB_JOB") + + attempts := os.Getenv("GITHUB_RUN_ATTEMPT") + if attempts == "" { + tags[constants.CIPipelineURL] = fmt.Sprintf("%s/actions/runs/%s", rawRepository, pipelineID) + } else { + tags[constants.CIPipelineURL] = fmt.Sprintf("%s/actions/runs/%s/attempts/%s", rawRepository, pipelineID, attempts) + } + + jsonString, err := getEnvVarsJSON("GITHUB_SERVER_URL", "GITHUB_REPOSITORY", "GITHUB_RUN_ID", "GITHUB_RUN_ATTEMPT") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractGitlab extracts CI information specific to GitLab. +func extractGitlab() map[string]string { + tags := map[string]string{} + url := os.Getenv("CI_PIPELINE_URL") + + tags[constants.CIProviderName] = "gitlab" + tags[constants.GitRepositoryURL] = os.Getenv("CI_REPOSITORY_URL") + tags[constants.GitCommitSHA] = os.Getenv("CI_COMMIT_SHA") + tags[constants.GitBranch] = firstEnv("CI_COMMIT_BRANCH", "CI_COMMIT_REF_NAME") + tags[constants.GitTag] = os.Getenv("CI_COMMIT_TAG") + tags[constants.CIWorkspacePath] = os.Getenv("CI_PROJECT_DIR") + tags[constants.CIPipelineID] = os.Getenv("CI_PIPELINE_ID") + tags[constants.CIPipelineName] = os.Getenv("CI_PROJECT_PATH") + tags[constants.CIPipelineNumber] = os.Getenv("CI_PIPELINE_IID") + tags[constants.CIPipelineURL] = url + tags[constants.CIJobURL] = os.Getenv("CI_JOB_URL") + tags[constants.CIJobName] = os.Getenv("CI_JOB_NAME") + tags[constants.CIStageName] = os.Getenv("CI_JOB_STAGE") + tags[constants.GitCommitMessage] = os.Getenv("CI_COMMIT_MESSAGE") + tags[constants.CINodeName] = os.Getenv("CI_RUNNER_ID") + tags[constants.CINodeLabels] = os.Getenv("CI_RUNNER_TAGS") + + author := os.Getenv("CI_COMMIT_AUTHOR") + authorArray := strings.FieldsFunc(author, func(s rune) bool { + return s == '<' || s == '>' + }) + tags[constants.GitCommitAuthorName] = strings.TrimSpace(authorArray[0]) + tags[constants.GitCommitAuthorEmail] = strings.TrimSpace(authorArray[1]) + tags[constants.GitCommitAuthorDate] = os.Getenv("CI_COMMIT_TIMESTAMP") + + jsonString, err := getEnvVarsJSON("CI_PROJECT_URL", "CI_PIPELINE_ID", "CI_JOB_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} + +// extractJenkins extracts CI information specific to Jenkins. +func extractJenkins() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "jenkins" + tags[constants.GitRepositoryURL] = firstEnv("GIT_URL", "GIT_URL_1") + tags[constants.GitCommitSHA] = os.Getenv("GIT_COMMIT") + + branchOrTag := os.Getenv("GIT_BRANCH") + empty := []byte("") + name, hasName := os.LookupEnv("JOB_NAME") + + if strings.Contains(branchOrTag, "tags/") { + tags[constants.GitTag] = branchOrTag + } else { + tags[constants.GitBranch] = branchOrTag + // remove branch for job name + removeBranch := regexp.MustCompile(fmt.Sprintf("/%s", normalizeRef(branchOrTag))) + name = string(removeBranch.ReplaceAll([]byte(name), empty)) + } + + if hasName { + removeVars := regexp.MustCompile("/[^/]+=[^/]*") + name = string(removeVars.ReplaceAll([]byte(name), empty)) + } + + tags[constants.CIWorkspacePath] = os.Getenv("WORKSPACE") + tags[constants.CIPipelineID] = os.Getenv("BUILD_TAG") + tags[constants.CIPipelineNumber] = os.Getenv("BUILD_NUMBER") + tags[constants.CIPipelineName] = name + tags[constants.CIPipelineURL] = os.Getenv("BUILD_URL") + tags[constants.CINodeName] = os.Getenv("NODE_NAME") + + jsonString, err := getEnvVarsJSON("DD_CUSTOM_TRACE_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + nodeLabels := os.Getenv("NODE_LABELS") + if len(nodeLabels) > 0 { + labelsArray := strings.Split(nodeLabels, " ") + jsonString, err := json.Marshal(labelsArray) + if err == nil { + tags[constants.CINodeLabels] = string(jsonString) + } + } + + return tags +} + +// extractTeamcity extracts CI information specific to TeamCity. +func extractTeamcity() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "teamcity" + tags[constants.CIJobURL] = os.Getenv("BUILD_URL") + tags[constants.CIJobName] = os.Getenv("TEAMCITY_BUILDCONF_NAME") + return tags +} + +// extractCodefresh extracts CI information specific to Codefresh. +func extractCodefresh() map[string]string { + tags := map[string]string{} + tags[constants.CIProviderName] = "codefresh" + tags[constants.CIPipelineID] = os.Getenv("CF_BUILD_ID") + tags[constants.CIPipelineName] = os.Getenv("CF_PIPELINE_NAME") + tags[constants.CIPipelineURL] = os.Getenv("CF_BUILD_URL") + tags[constants.CIJobName] = os.Getenv("CF_STEP_NAME") + + jsonString, err := getEnvVarsJSON("CF_BUILD_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + cfBranch := os.Getenv("CF_BRANCH") + isTag := strings.Contains(cfBranch, "tags/") + var refKey string + if isTag { + refKey = constants.GitTag + } else { + refKey = constants.GitBranch + } + tags[refKey] = normalizeRef(cfBranch) + + return tags +} + +// extractTravis extracts CI information specific to Travis CI. +func extractTravis() map[string]string { + tags := map[string]string{} + prSlug := os.Getenv("TRAVIS_PULL_REQUEST_SLUG") + repoSlug := prSlug + if strings.TrimSpace(repoSlug) == "" { + repoSlug = os.Getenv("TRAVIS_REPO_SLUG") + } + tags[constants.CIProviderName] = "travisci" + tags[constants.GitRepositoryURL] = fmt.Sprintf("https://github.com/%s.git", repoSlug) + tags[constants.GitCommitSHA] = os.Getenv("TRAVIS_COMMIT") + tags[constants.GitTag] = os.Getenv("TRAVIS_TAG") + tags[constants.GitBranch] = firstEnv("TRAVIS_PULL_REQUEST_BRANCH", "TRAVIS_BRANCH") + tags[constants.CIWorkspacePath] = os.Getenv("TRAVIS_BUILD_DIR") + tags[constants.CIPipelineID] = os.Getenv("TRAVIS_BUILD_ID") + tags[constants.CIPipelineNumber] = os.Getenv("TRAVIS_BUILD_NUMBER") + tags[constants.CIPipelineName] = repoSlug + tags[constants.CIPipelineURL] = os.Getenv("TRAVIS_BUILD_WEB_URL") + tags[constants.CIJobURL] = os.Getenv("TRAVIS_JOB_WEB_URL") + tags[constants.GitCommitMessage] = os.Getenv("TRAVIS_COMMIT_MESSAGE") + return tags +} + +// extractAwsCodePipeline extracts CI information specific to AWS CodePipeline. +func extractAwsCodePipeline() map[string]string { + tags := map[string]string{} + + if !strings.HasPrefix(os.Getenv("CODEBUILD_INITIATOR"), "codepipeline") { + // CODEBUILD_INITIATOR is defined but this is not a codepipeline build + return tags + } + + tags[constants.CIProviderName] = "awscodepipeline" + tags[constants.CIPipelineID] = os.Getenv("DD_PIPELINE_EXECUTION_ID") + + jsonString, err := getEnvVarsJSON("CODEBUILD_BUILD_ARN", "DD_ACTION_EXECUTION_ID", "DD_PIPELINE_EXECUTION_ID") + if err == nil { + tags[constants.CIEnvVars] = string(jsonString) + } + + return tags +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/codeowners.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/codeowners.go new file mode 100644 index 00000000..06749451 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/codeowners.go @@ -0,0 +1,307 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + + logger "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +// This is a port of https://github.com/DataDog/dd-trace-dotnet/blob/v2.53.0/tracer/src/Datadog.Trace/Ci/CodeOwners.cs + +type ( + // CodeOwners represents a structured data type that holds sections of code owners. + // Each section maps to a slice of entries, where each entry includes a pattern and a list of owners. + CodeOwners struct { + Sections []*Section + } + + // Section represents a block of structured data of multiple entries in a single section + Section struct { + Name string + Entries []Entry + } + + // Entry represents a single entry in a CODEOWNERS file. + // It includes the pattern for matching files, the list of owners, and the section to which it belongs. + Entry struct { + Pattern string + Owners []string + Section string + } +) + +var ( + // codeowners holds the parsed CODEOWNERS file data. + codeowners *CodeOwners + codeownersMutex sync.Mutex +) + +// GetCodeOwners retrieves and caches the CODEOWNERS data. +// It looks for the CODEOWNERS file in various standard locations within the CI workspace. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A pointer to a CodeOwners struct containing the parsed CODEOWNERS data, or nil if not found. +func GetCodeOwners() *CodeOwners { + codeownersMutex.Lock() + defer codeownersMutex.Unlock() + + if codeowners != nil { + return codeowners + } + + tags := GetCITags() + if v, ok := tags[constants.CIWorkspacePath]; ok { + paths := []string{ + filepath.Join(v, "CODEOWNERS"), + filepath.Join(v, ".github", "CODEOWNERS"), + filepath.Join(v, ".gitlab", "CODEOWNERS"), + filepath.Join(v, ".docs", "CODEOWNERS"), + } + for _, path := range paths { + if _, err := os.Stat(path); err == nil { + codeowners, err = NewCodeOwners(path) + if err == nil { + if logger.DebugEnabled() { + logger.Debug("civisibility: codeowner file '%v' was loaded successfully.", path) + } + return codeowners + } + logger.Debug("Error parsing codeowners: %s", err) + } + } + } + + return nil +} + +// NewCodeOwners creates a new instance of CodeOwners by parsing a CODEOWNERS file located at the given filePath. +// It returns an error if the file cannot be read or parsed properly. +func NewCodeOwners(filePath string) (*CodeOwners, error) { + if filePath == "" { + return nil, fmt.Errorf("filePath cannot be empty") + } + + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer func() { + err = file.Close() + if err != nil && !errors.Is(os.ErrClosed, err) { + logger.Warn("Error closing codeowners file: %s", err.Error()) + } + }() + + var entriesList []Entry + var sectionsList []string + var currentSectionName string + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + + // Identify section headers, which are lines enclosed in square brackets + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSectionName = line[1 : len(line)-1] + foundSectionName := findSectionIgnoreCase(sectionsList, currentSectionName) + if foundSectionName == "" { + sectionsList = append(sectionsList, currentSectionName) + } else { + currentSectionName = foundSectionName + } + continue + } + + finalLine := line + var ownersList []string + terms := strings.Fields(line) + for _, term := range terms { + if len(term) == 0 { + continue + } + + // Identify owners by their prefixes (either @ for usernames or containing @ for emails) + if term[0] == '@' || strings.Contains(term, "@") { + ownersList = append(ownersList, term) + pos := strings.Index(finalLine, term) + if pos > 0 { + finalLine = finalLine[:pos] + finalLine[pos+len(term):] + } + } + } + + finalLine = strings.TrimSpace(finalLine) + if len(finalLine) == 0 { + continue + } + + entriesList = append(entriesList, Entry{Pattern: finalLine, Owners: ownersList, Section: currentSectionName}) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Reverse the entries list to maintain the order of appearance in the file + for i, j := 0, len(entriesList)-1; i < j; i, j = i+1, j-1 { + entriesList[i], entriesList[j] = entriesList[j], entriesList[i] + } + + codeOwners := &CodeOwners{} + for _, entry := range entriesList { + var section *Section + for _, val := range codeOwners.Sections { + if val.Name == entry.Section { + section = val + break + } + } + + if section == nil { + section = &Section{Name: entry.Section, Entries: []Entry{}} + codeOwners.Sections = append(codeOwners.Sections, section) + } + + section.Entries = append(section.Entries, entry) + } + + return codeOwners, nil +} + +// findSectionIgnoreCase searches for a section name in a case-insensitive manner. +// It returns the section name if found, otherwise returns an empty string. +func findSectionIgnoreCase(sections []string, section string) string { + sectionLower := strings.ToLower(section) + for _, s := range sections { + if strings.ToLower(s) == sectionLower { + return s + } + } + return "" +} + +// GetSection gets the first Section entry in the CodeOwners that matches the section name. +// It returns a pointer to the matched entry, or nil if no match is found +func (co *CodeOwners) GetSection(section string) *Section { + for _, value := range co.Sections { + if value.Name == section { + return value + } + } + + return nil +} + +// Match finds the first entry in the CodeOwners that matches the given value. +// It returns a pointer to the matched entry, or nil if no match is found. +func (co *CodeOwners) Match(value string) (*Entry, bool) { + var matchedEntries []Entry + + for _, section := range co.Sections { + for _, entry := range section.Entries { + pattern := entry.Pattern + finalPattern := pattern + + var includeAnythingBefore, includeAnythingAfter bool + + if strings.HasPrefix(pattern, "/") { + includeAnythingBefore = false + } else { + if strings.HasPrefix(finalPattern, "*") { + finalPattern = finalPattern[1:] + } + includeAnythingBefore = true + } + + if strings.HasSuffix(pattern, "/") { + includeAnythingAfter = true + } else if strings.HasSuffix(pattern, "/*") { + includeAnythingAfter = true + finalPattern = finalPattern[:len(finalPattern)-1] + } else { + includeAnythingAfter = false + } + + if includeAnythingAfter { + found := includeAnythingBefore && strings.Contains(value, finalPattern) || strings.HasPrefix(value, finalPattern) + if !found { + continue + } + + if !strings.HasSuffix(pattern, "/*") { + matchedEntries = append(matchedEntries, entry) + break + } + + patternEnd := strings.Index(value, finalPattern) + if patternEnd != -1 { + patternEnd += len(finalPattern) + remainingString := value[patternEnd:] + if strings.Index(remainingString, "/") == -1 { + matchedEntries = append(matchedEntries, entry) + break + } + } + } else { + if includeAnythingBefore { + if strings.HasSuffix(value, finalPattern) { + matchedEntries = append(matchedEntries, entry) + break + } + } else if value == finalPattern { + matchedEntries = append(matchedEntries, entry) + break + } + } + } + } + + switch len(matchedEntries) { + case 0: + return nil, false + case 1: + return &matchedEntries[0], true + default: + patterns := make([]string, 0) + owners := make([]string, 0) + sections := make([]string, 0) + for _, entry := range matchedEntries { + patterns = append(patterns, entry.Pattern) + owners = append(owners, entry.Owners...) + sections = append(sections, entry.Section) + } + return &Entry{ + Pattern: strings.Join(patterns, " | "), + Owners: owners, + Section: strings.Join(sections, " | "), + }, true + } +} + +// GetOwnersString returns a formatted string of the owners list in an Entry. +// It returns an empty string if there are no owners. +func (e Entry) GetOwnersString() string { + if e.Owners == nil || len(e.Owners) == 0 { + return "" + } + + return "[\"" + strings.Join(e.Owners, "\",\"") + "\"]" +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/environmentTags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/environmentTags.go new file mode 100644 index 00000000..8904596e --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/environmentTags.go @@ -0,0 +1,193 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants" + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" + "gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo" +) + +var ( + // ciTags holds the CI/CD environment variable information. + ciTags map[string]string + ciTagsMutex sync.Mutex + + // ciMetrics holds the CI/CD environment numeric variable information + ciMetrics map[string]float64 + ciMetricsMutex sync.Mutex +) + +// GetCITags retrieves and caches the CI/CD tags from environment variables. +// It initializes the ciTags map if it is not already initialized. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A map[string]string containing the CI/CD tags. +func GetCITags() map[string]string { + ciTagsMutex.Lock() + defer ciTagsMutex.Unlock() + + if ciTags == nil { + ciTags = createCITagsMap() + } + + return ciTags +} + +// GetCIMetrics retrieves and caches the CI/CD metrics from environment variables. +// It initializes the ciMetrics map if it is not already initialized. +// This function is thread-safe due to the use of a mutex. +// +// Returns: +// +// A map[string]float64 containing the CI/CD metrics. +func GetCIMetrics() map[string]float64 { + ciMetricsMutex.Lock() + defer ciMetricsMutex.Unlock() + + if ciMetrics == nil { + ciMetrics = createCIMetricsMap() + } + + return ciMetrics +} + +// GetRelativePathFromCITagsSourceRoot calculates the relative path from the CI workspace root to the specified path. +// If the CI workspace root is not available in the tags, it returns the original path. +// +// Parameters: +// +// path - The absolute or relative file path for which the relative path should be calculated. +// +// Returns: +// +// The relative path from the CI workspace root to the specified path, or the original path if an error occurs. +func GetRelativePathFromCITagsSourceRoot(path string) string { + tags := GetCITags() + if v, ok := tags[constants.CIWorkspacePath]; ok { + relPath, err := filepath.Rel(v, path) + if err == nil { + return filepath.ToSlash(relPath) + } + } + + return path +} + +// createCITagsMap creates a map of CI/CD tags by extracting information from environment variables and the local Git repository. +// It also adds OS and runtime information to the tags. +// +// Returns: +// +// A map[string]string containing the extracted CI/CD tags. +func createCITagsMap() map[string]string { + localTags := getProviderTags() + + // Populate runtime values + localTags[constants.OSPlatform] = runtime.GOOS + localTags[constants.OSVersion] = osinfo.OSVersion() + localTags[constants.OSArchitecture] = runtime.GOARCH + localTags[constants.RuntimeName] = runtime.Compiler + localTags[constants.RuntimeVersion] = runtime.Version() + log.Debug("civisibility: os platform: %v", runtime.GOOS) + log.Debug("civisibility: os architecture: %v", runtime.GOARCH) + log.Debug("civisibility: runtime version: %v", runtime.Version()) + + // Get command line test command + var cmd string + if len(os.Args) == 1 { + cmd = filepath.Base(os.Args[0]) + } else { + cmd = fmt.Sprintf("%s %s ", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " ")) + } + + // Filter out some parameters to make the command more stable. + cmd = regexp.MustCompile(`(?si)-test.gocoverdir=(.*)\s`).ReplaceAllString(cmd, "") + cmd = regexp.MustCompile(`(?si)-test.v=(.*)\s`).ReplaceAllString(cmd, "") + cmd = regexp.MustCompile(`(?si)-test.testlogfile=(.*)\s`).ReplaceAllString(cmd, "") + cmd = strings.TrimSpace(cmd) + localTags[constants.TestCommand] = cmd + log.Debug("civisibility: test command: %v", cmd) + + // Populate the test session name + if testSessionName, ok := os.LookupEnv(constants.CIVisibilityTestSessionNameEnvironmentVariable); ok { + localTags[constants.TestSessionName] = testSessionName + } else if jobName, ok := localTags[constants.CIJobName]; ok { + localTags[constants.TestSessionName] = fmt.Sprintf("%s-%s", jobName, cmd) + } else { + localTags[constants.TestSessionName] = cmd + } + log.Debug("civisibility: test session name: %v", localTags[constants.TestSessionName]) + + // Populate missing git data + gitData, _ := getLocalGitData() + + // Populate Git metadata from the local Git repository if not already present in localTags + if _, ok := localTags[constants.CIWorkspacePath]; !ok { + localTags[constants.CIWorkspacePath] = gitData.SourceRoot + } + if _, ok := localTags[constants.GitRepositoryURL]; !ok { + localTags[constants.GitRepositoryURL] = gitData.RepositoryURL + } + if _, ok := localTags[constants.GitCommitSHA]; !ok { + localTags[constants.GitCommitSHA] = gitData.CommitSha + } + if _, ok := localTags[constants.GitBranch]; !ok { + localTags[constants.GitBranch] = gitData.Branch + } + + // If the commit SHA matches, populate additional Git metadata + if localTags[constants.GitCommitSHA] == gitData.CommitSha { + if _, ok := localTags[constants.GitCommitAuthorDate]; !ok { + localTags[constants.GitCommitAuthorDate] = gitData.AuthorDate.String() + } + if _, ok := localTags[constants.GitCommitAuthorName]; !ok { + localTags[constants.GitCommitAuthorName] = gitData.AuthorName + } + if _, ok := localTags[constants.GitCommitAuthorEmail]; !ok { + localTags[constants.GitCommitAuthorEmail] = gitData.AuthorEmail + } + if _, ok := localTags[constants.GitCommitCommitterDate]; !ok { + localTags[constants.GitCommitCommitterDate] = gitData.CommitterDate.String() + } + if _, ok := localTags[constants.GitCommitCommitterName]; !ok { + localTags[constants.GitCommitCommitterName] = gitData.CommitterName + } + if _, ok := localTags[constants.GitCommitCommitterEmail]; !ok { + localTags[constants.GitCommitCommitterEmail] = gitData.CommitterEmail + } + if _, ok := localTags[constants.GitCommitMessage]; !ok { + localTags[constants.GitCommitMessage] = gitData.CommitMessage + } + } + + log.Debug("civisibility: workspace directory: %v", localTags[constants.CIWorkspacePath]) + log.Debug("civisibility: common tags created with %v items", len(localTags)) + return localTags +} + +// createCIMetricsMap creates a map of CI/CD tags by extracting information from environment variables and runtime information. +// +// Returns: +// +// A map[string]float64 containing the metrics extracted +func createCIMetricsMap() map[string]float64 { + localMetrics := make(map[string]float64) + localMetrics[constants.LogicalCPUCores] = float64(runtime.NumCPU()) + + log.Debug("civisibility: common metrics created with %v items", len(localMetrics)) + return localMetrics +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/git.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/git.go new file mode 100644 index 00000000..fb62a375 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/git.go @@ -0,0 +1,104 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "errors" + "os/exec" + "regexp" + "strconv" + "strings" + "time" +) + +// localGitData holds various pieces of information about the local Git repository, +// including the source root, repository URL, branch, commit SHA, author and committer details, and commit message. +type localGitData struct { + SourceRoot string + RepositoryURL string + Branch string + CommitSha string + AuthorDate time.Time + AuthorName string + AuthorEmail string + CommitterDate time.Time + CommitterName string + CommitterEmail string + CommitMessage string +} + +// regexpSensitiveInfo is a regular expression used to match and filter out sensitive information from URLs. +var regexpSensitiveInfo = regexp.MustCompile("(https?://|ssh?://)[^/]*@") + +// getLocalGitData retrieves information about the local Git repository from the current HEAD. +// It gathers details such as the repository URL, current branch, latest commit SHA, author and committer details, and commit message. +// +// Returns: +// +// A localGitData struct populated with the retrieved Git data. +// An error if any Git command fails or the retrieved data is incomplete. +func getLocalGitData() (localGitData, error) { + gitData := localGitData{} + + // Extract the absolute path to the Git directory + out, err := exec.Command("git", "rev-parse", "--absolute-git-dir").Output() + if err == nil { + gitData.SourceRoot = strings.ReplaceAll(strings.Trim(string(out), "\n"), ".git", "") + } + + // Extract the repository URL + out, err = exec.Command("git", "ls-remote", "--get-url").Output() + if err == nil { + gitData.RepositoryURL = filterSensitiveInfo(strings.Trim(string(out), "\n")) + } + + // Extract the current branch name + out, err = exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() + if err == nil { + gitData.Branch = strings.Trim(string(out), "\n") + } + + // Get commit details from the latest commit using git log (git log -1 --pretty='%H","%aI","%an","%ae","%cI","%cn","%ce","%B') + out, err = exec.Command("git", "log", "-1", "--pretty=%H\",\"%at\",\"%an\",\"%ae\",\"%ct\",\"%cn\",\"%ce\",\"%B").Output() + if err != nil { + return gitData, err + } + + // Split the output into individual components + outArray := strings.Split(string(out), "\",\"") + if len(outArray) < 8 { + return gitData, errors.New("git log failed") + } + + // Parse author and committer dates from Unix timestamp + authorUnixDate, _ := strconv.ParseInt(outArray[1], 10, 64) + committerUnixDate, _ := strconv.ParseInt(outArray[4], 10, 64) + + // Populate the localGitData struct with the parsed information + gitData.CommitSha = outArray[0] + gitData.AuthorDate = time.Unix(authorUnixDate, 0) + gitData.AuthorName = outArray[2] + gitData.AuthorEmail = outArray[3] + gitData.CommitterDate = time.Unix(committerUnixDate, 0) + gitData.CommitterName = outArray[5] + gitData.CommitterEmail = outArray[6] + gitData.CommitMessage = strings.Trim(outArray[7], "\n") + return gitData, nil +} + +// filterSensitiveInfo removes sensitive information from a given URL using a regular expression. +// It replaces the user credentials part of the URL (if present) with an empty string. +// +// Parameters: +// +// url - The URL string from which sensitive information should be filtered out. +// +// Returns: +// +// The sanitized URL string with sensitive information removed. +func filterSensitiveInfo(url string) string { + return string(regexpSensitiveInfo.ReplaceAll([]byte(url), []byte("$1"))[:]) +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/home.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/home.go new file mode 100644 index 00000000..fd2000c8 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/home.go @@ -0,0 +1,126 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + "gopkg.in/DataDog/dd-trace-go.v1/internal/log" +) + +// This code is based on: https://github.com/mitchellh/go-homedir/blob/v1.1.0/homedir.go (MIT License) + +// ExpandPath expands a file path that starts with '~' to the user's home directory. +// If the path does not start with '~', it is returned unchanged. +// +// Parameters: +// +// path - The file path to be expanded. +// +// Returns: +// +// The expanded file path, with '~' replaced by the user's home directory, if applicable. +func ExpandPath(path string) string { + if len(path) == 0 || path[0] != '~' { + return path + } + + // If the second character is not '/' or '\', return the path unchanged + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return path + } + + homeFolder := getHomeDir() + if len(homeFolder) > 0 { + return filepath.Join(homeFolder, path[1:]) + } + + return path +} + +// getHomeDir returns the home directory of the current user. +// The method used to determine the home directory depends on the operating system. +// +// On Windows, it prefers the HOME environment variable, then USERPROFILE, and finally combines HOMEDRIVE and HOMEPATH. +// On Unix-like systems, it prefers the HOME environment variable, and falls back to various shell commands +// to determine the home directory if necessary. +// +// Returns: +// +// The home directory of the current user. +func getHomeDir() (homeDir string) { + defer func() { + log.Debug("civisibility: home directory: %v", homeDir) + }() + + if runtime.GOOS == "windows" { + if home := os.Getenv("HOME"); home != "" { + // First prefer the HOME environment variable + return home + } + if userProfile := os.Getenv("USERPROFILE"); userProfile != "" { + // Prefer the USERPROFILE environment variable + return userProfile + } + + homeDrive := os.Getenv("HOMEDRIVE") + homePath := os.Getenv("HOMEPATH") + return homeDrive + homePath + } + + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, environment variables are lowercase. + homeEnv = "home" + } + + if home := os.Getenv(homeEnv); home != "" { + // Prefer the HOME environment variable + return home + } + + var stdout bytes.Buffer + if runtime.GOOS == "darwin" { + // On macOS, use dscl to read the NFSHomeDirectory + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result + } + } + } else { + // On other Unix-like systems, use getent to read the passwd entry for the current user + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // The passwd entry is in the format: username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5] + } + } + } + } + + // If all else fails, use the shell to determine the home directory + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + return strings.TrimSpace(stdout.String()) + } + + return "" +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/names.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/names.go new file mode 100644 index 00000000..94eacdb1 --- /dev/null +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/names.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024 Datadog, Inc. + +package utils + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "slices" + "strings" +) + +var ( + // ignoredFunctionsFromStackTrace array with functions we want to ignore on the final stacktrace (because doesn't add anything useful) + ignoredFunctionsFromStackTrace = []string{"runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"} +) + +// GetModuleAndSuiteName extracts the module name and suite name from a given program counter (pc). +// This function utilizes runtime.FuncForPC to retrieve the full function name associated with the +// program counter, then splits the string to separate the package name from the function name. +// +// Example 1: +// +// Input: github.com/DataDog/dd-sdk-go-testing.TestRun +// Output: +// module: github.com/DataDog/dd-sdk-go-testing +// suite: testing_test.go +// +// Example 2: +// +// Input: github.com/DataDog/dd-sdk-go-testing.TestRun.func1 +// Output: +// module: github.com/DataDog/dd-sdk-go-testing +// suite: testing_test.go +// +// Parameters: +// +// pc - The program counter for which the module and suite name should be retrieved. +// +// Returns: +// +// module - The module name extracted from the full function name. +// suite - The base name of the file where the function is located. +func GetModuleAndSuiteName(pc uintptr) (module string, suite string) { + funcValue := runtime.FuncForPC(pc) + funcFullName := funcValue.Name() + lastSlash := strings.LastIndexByte(funcFullName, '/') + if lastSlash < 0 { + lastSlash = 0 + } + firstDot := strings.IndexByte(funcFullName[lastSlash:], '.') + lastSlash + file, _ := funcValue.FileLine(funcValue.Entry()) + return funcFullName[:firstDot], filepath.Base(file) +} + +// GetStacktrace retrieves the current stack trace, skipping a specified number of frames. +// +// This function captures the stack trace of the current goroutine, formats it, and returns it as a string. +// It uses runtime.Callers to capture the program counters of the stack frames and runtime.CallersFrames +// to convert these program counters into readable frames. The stack trace is formatted to include the function +// name, file name, and line number of each frame. +// +// Parameters: +// +// skip - The number of stack frames to skip before capturing the stack trace. +// +// Returns: +// +// A string representation of the current stack trace, with each frame on a new line. +func GetStacktrace(skip int) string { + pcs := make([]uintptr, 256) + total := runtime.Callers(skip+2, pcs) + frames := runtime.CallersFrames(pcs[:total]) + buffer := new(bytes.Buffer) + for { + if frame, ok := frames.Next(); ok { + // let's check if we need to ignore this frame + if slices.Contains(ignoredFunctionsFromStackTrace, frame.Function) { + continue + } + // writing frame to the buffer + _, _ = fmt.Fprintf(buffer, "%s\n\t%s:%d\n", frame.Function, frame.File, frame.Line) + } else { + break + } + + } + return buffer.String() +} diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go index 654c72db..10a8b8af 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams/processor.go @@ -340,7 +340,11 @@ func (p *Processor) Start() { } p.stop = make(chan struct{}) p.flushRequest = make(chan chan<- struct{}) - go p.reportStats() + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.reportStats() + }() p.wg.Add(1) go func() { defer p.wg.Done() @@ -372,7 +376,14 @@ func (p *Processor) Stop() { } func (p *Processor) reportStats() { - for range time.NewTicker(time.Second * 10).C { + tick := time.NewTicker(time.Second * 10) + defer tick.Stop() + for { + select { + case <-p.stop: + return + case <-tick.C: + } p.statsd.Count("datadog.datastreams.processor.payloads_in", atomic.SwapInt64(&p.stats.payloadsIn, 0), nil, 1) p.statsd.Count("datadog.datastreams.processor.flushed_payloads", atomic.SwapInt64(&p.stats.flushedPayloads, 0), nil, 1) p.statsd.Count("datadog.datastreams.processor.flushed_buckets", atomic.SwapInt64(&p.stats.flushedBuckets, 0), nil, 1) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go index 35626580..a36f5003 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go @@ -80,7 +80,10 @@ func SetDogstatsdAddr(addr string) { func StatsTags() []string { cfg.mu.RLock() defer cfg.mu.RUnlock() - return cfg.statsTags + // Copy the slice before returning it, so that callers cannot pollute the underlying array + tags := make([]string, len(cfg.statsTags)) + copy(tags, cfg.statsTags) + return tags } // SetStatsTags configures the list of tags that should be applied to contribs' statsd.Client as global tags @@ -88,7 +91,10 @@ func StatsTags() []string { func SetStatsTags(tags []string) { cfg.mu.Lock() defer cfg.mu.Unlock() - cfg.statsTags = tags + // Copy the slice before setting it, so that any changes to the slice provided to SetStatsTags does not pollute the underlying array of statsTags + statsTags := make([]string, len(tags)) + copy(statsTags, tags) + cfg.statsTags = statsTags } // RuntimeID returns this process's unique runtime id. diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go index 86b8f692..4076c2ad 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig/remoteconfig.go @@ -70,16 +70,48 @@ const ( APMTracingHTTPHeaderTags // APMTracingCustomTags enables APM client to set custom tags on all spans APMTracingCustomTags - // ASMRASPSSRF enables ASM support for runtime protection against SSRF attacks - ASMRASPSSRF = 23 -) - -// Additional capability bit index values that are non-consecutive from above. -const ( + // ASMProcessorOverrides adds support for processor overrides through the ASM RC Product + ASMProcessorOverrides + // ASMCustomDataScanners adds support for custom data scanners through the ASM RC Product + ASMCustomDataScanners + // ASMExclusionData adds support configurable exclusion filter data from the ASM_DATA Product + ASMExclusionData // APMTracingEnabled enables APM tracing - APMTracingEnabled Capability = 19 + APMTracingEnabled + // APMTracingDataStreamsEnabled enables Data Streams Monitoring + APMTracingDataStreamsEnabled + // ASMRASPSQLI enables ASM support for runtime protection against SQL Injection attacks + ASMRASPSQLI + // ASMRASPLFI enables ASM support for runtime protection against Local File Inclusion attacks + ASMRASPLFI + // ASMRASPSSRF enables ASM support for runtime protection against SSRF attacks + ASMRASPSSRF + // ASMRASPSHI enables ASM support for runtime protection against XSS attacks + ASMRASPSHI + // ASMRASPXXE enables ASM support for runtime protection against XXE attacks + ASMRASPXXE + // ASMRASPRCE enables ASM support for runtime protection against Remote Code Execution + ASMRASPRCE + // ASMRASPNOSQLI enables ASM support for runtime protection against NoSQL Injection attacks + ASMRASPNOSQLI + // ASMRASPXSS enables ASM support for runtime protection against Cross Site Scripting attacks + ASMRASPXSS // APMTracingSampleRules represents the sampling rate using matching rules from APM client libraries - APMTracingSampleRules = 29 + APMTracingSampleRules + // CSMActivation represents the capability to activate CSM through remote configuration + CSMActivation + // ASMAutoUserInstrumMode represents the capability to enable the automatic user instrumentation mode + ASMAutoUserInstrumMode + // ASMEndpointFingerprinting represents the capability to enable endpoint fingerprinting + ASMEndpointFingerprinting + // ASMSessionFingerprinting represents the capability to enable session fingerprinting + ASMSessionFingerprinting + // ASMNetworkFingerprinting represents the capability to enable network fingerprinting + ASMNetworkFingerprinting + // ASMHeaderFingerprinting represents the capability to enable header fingerprinting + ASMHeaderFingerprinting + // ASMTruncationRules is the support for truncation payload rules + ASMTruncationRules ) // ErrClientNotStarted is returned when the remote config client is not started. diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/event.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/event.go index ac292f13..0b6ff818 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/event.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/event.go @@ -27,6 +27,8 @@ const ( ExploitEvent EventCategory = "exploit" ) +const SpanKey = "_dd.stack" + // Event is the toplevel structure to contain a stacktrace and the additional information needed to correlate it with other data type Event struct { // Category is a well-known type of the event, not optional @@ -82,30 +84,32 @@ func WithID(id string) Options { } } -// AddToSpan adds the event to the given span's root span as a tag if stacktrace collection is enabled -func AddToSpan(span ddtrace.Span, events ...*Event) { +// GetSpanValue returns the value to be set as a tag on a span for the given stacktrace events +func GetSpanValue(events ...*Event) any { if !Enabled() { - return + return nil } - // TODO(eliott.bouhana): switch to a map[EventCategory][]*Event type when the tinylib/msgp@1.1.10 is out - groupByCategory := make(map[string]any, 3) - + groupByCategory := make(map[string][]*Event, 3) for _, event := range events { if _, ok := groupByCategory[string(event.Category)]; !ok { groupByCategory[string(event.Category)] = []*Event{event} continue } - - groupByCategory[string(event.Category)] = append(groupByCategory[string(event.Category)].([]*Event), event) + groupByCategory[string(event.Category)] = append(groupByCategory[string(event.Category)], event) } + return internal.MetaStructValue{Value: groupByCategory} +} + +// AddToSpan adds the event to the given span's root span as a tag if stacktrace collection is enabled +func AddToSpan(span ddtrace.Span, events ...*Event) { + value := GetSpanValue(events...) type rooter interface { Root() ddtrace.Span } if lrs, ok := span.(rooter); ok { span = lrs.Root() } - - span.SetTag("_dd.stack", internal.MetaStructValue{Value: groupByCategory}) + span.SetTag(SpanKey, value) } diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/stacktrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/stacktrace.go index 9b9ec8c5..060aebe1 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/stacktrace.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace/stacktrace.go @@ -33,6 +33,7 @@ var ( "github.com/DataDog/datadog-agent", "github.com/DataDog/appsec-internal-go", "github.com/datadog/orchestrion", + "github.com/DataDog/orchestrion", } ) diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go index 3f19da4f..2d4a2161 100644 --- a/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go +++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go @@ -13,7 +13,7 @@ import ( // Tag specifies the current release tag. It needs to be manually // updated. A test checks that the value of Tag never points to a // git tag that is older than HEAD. -const Tag = "v1.67.0" +const Tag = "v1.69.0" // Dissected version number. Filled during init() var ( diff --git a/vendor/modules.txt b/vendor/modules.txt index 62427cfd..f9abaea4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,9 +1,9 @@ -# cloud.google.com/go v0.115.1 -## explicit; go 1.20 +# cloud.google.com/go v0.116.0 +## explicit; go 1.21 cloud.google.com/go/internal/detect cloud.google.com/go/internal/optional cloud.google.com/go/internal/pubsub -# cloud.google.com/go/auth v0.9.1 +# cloud.google.com/go/auth v0.9.9 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -15,6 +15,7 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport @@ -22,14 +23,14 @@ cloud.google.com/go/auth/internal/transport/cert # cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.13 -## explicit; go 1.20 +# cloud.google.com/go/iam v1.2.1 +## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/pubsub v1.42.0 +# cloud.google.com/go/pubsub v1.45.1 ## explicit; go 1.21 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 @@ -37,7 +38,7 @@ cloud.google.com/go/pubsub/apiv1/pubsubpb cloud.google.com/go/pubsub/internal cloud.google.com/go/pubsub/internal/distribution cloud.google.com/go/pubsub/internal/scheduler -# github.com/DataDog/appsec-internal-go v1.7.0 +# github.com/DataDog/appsec-internal-go v1.8.0 ## explicit; go 1.21 github.com/DataDog/appsec-internal-go/appsec github.com/DataDog/appsec-internal-go/httpsec @@ -47,13 +48,13 @@ github.com/DataDog/appsec-internal-go/netip # github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 ## explicit; go 1.21 github.com/DataDog/datadog-agent/pkg/obfuscate -# github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 -## explicit; go 1.21 +# github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 +## explicit; go 1.22.0 github.com/DataDog/datadog-agent/pkg/remoteconfig/state # github.com/DataDog/datadog-go/v5 v5.5.0 ## explicit; go 1.13 github.com/DataDog/datadog-go/v5/statsd -# github.com/DataDog/go-libddwaf/v3 v3.3.0 +# github.com/DataDog/go-libddwaf/v3 v3.4.0 ## explicit; go 1.21 github.com/DataDog/go-libddwaf/v3 github.com/DataDog/go-libddwaf/v3/errors @@ -170,7 +171,7 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -181,6 +182,7 @@ github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal +github.com/googleapis/gax-go/v2/iterator # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux @@ -207,8 +209,8 @@ github.com/mitchellh/mapstructure github.com/outcaste-io/ristretto github.com/outcaste-io/ristretto/z github.com/outcaste-io/ristretto/z/simd -# github.com/philhofer/fwd v1.1.2 -## explicit; go 1.15 +# github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 +## explicit; go 1.20 github.com/philhofer/fwd # github.com/pkg/errors v0.9.1 ## explicit @@ -226,10 +228,10 @@ github.com/secure-systems-lab/go-securesystemslib/cjson ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/tinylib/msgp v1.1.9 -## explicit; go 1.18 +# github.com/tinylib/msgp v1.2.1 +## explicit; go 1.20 github.com/tinylib/msgp/msgp -# github.com/xanzy/go-gitlab v0.108.0 +# github.com/xanzy/go-gitlab v0.112.0 ## explicit; go 1.19 github.com/xanzy/go-gitlab # go.opencensus.io v0.24.0 @@ -251,16 +253,17 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 +# go.opentelemetry.io/otel v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -274,12 +277,12 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.28.0 +# go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.28.0 +# go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -300,7 +303,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.26.0 +# golang.org/x/crypto v0.28.0 ## explicit; go 1.20 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -309,10 +312,10 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/mod v0.17.0 +# golang.org/x/mod v0.18.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.28.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -320,7 +323,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -336,26 +339,26 @@ golang.org/x/oauth2/jwt ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.24.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 ## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.195.0 +# google.golang.org/api v0.203.0 ## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -371,19 +374,19 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c +# google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 +# google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.67.1 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -413,7 +416,9 @@ google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancer/gracefulswitch @@ -436,12 +441,14 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -451,8 +458,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -488,8 +495,8 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 -## explicit; go 1.21 +# gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 +## explicit; go 1.22.0 gopkg.in/DataDog/dd-trace-go.v1/appsec/events gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace @@ -504,19 +511,27 @@ gopkg.in/DataDog/dd-trace-go.v1/internal gopkg.in/DataDog/dd-trace-go.v1/internal/appsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace -gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec +gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants +gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils gopkg.in/DataDog/dd-trace-go.v1/internal/datastreams gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig gopkg.in/DataDog/dd-trace-go.v1/internal/hostname