diff --git a/go.mod b/go.mod index 7f3774608e266..5909de01e9da3 100644 --- a/go.mod +++ b/go.mod @@ -321,9 +321,6 @@ replace github.com/cloudflare/cloudflare-go => github.com/cyriltovena/cloudflare exclude k8s.io/client-go v8.0.0+incompatible -// grpc v1.46.0 removed "WithBalancerName()" API, still in use by weaveworks/commons. -replace google.golang.org/grpc => google.golang.org/grpc v1.45.0 - // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet. replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe diff --git a/go.sum b/go.sum index 2aec656b73ce7..86f0120d9a49b 100644 --- a/go.sum +++ b/go.sum @@ -300,10 +300,16 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -409,7 +415,16 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.0.0-20180919002855-2137d9196328/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -585,6 +600,8 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -961,6 +978,7 @@ github.com/lstoll/grpce v1.7.0/go.mod h1:XiCWl3R+avNCT7KsTjv3qCblgsSqd0SC4ymySrH github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1526,6 +1544,8 @@ golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZ golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1555,6 +1575,7 @@ golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1671,6 +1692,7 @@ golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1809,9 +1831,11 @@ golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2043,8 +2067,49 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20210916203835-567da6b86340/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2128,6 +2193,7 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c90a..f4f9408f38529 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. @@ -243,7 +244,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -370,56 +371,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transient failure state. - numIdle uint64 // Number of addrConns in idle state. -} - -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else there are no subconns and the aggregated state is Transient Failure -// -// Shutdown is not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - case connectivity.Idle: - cse.numIdle += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - if cse.numTransientFailure > 0 { - return connectivity.TransientFailure - } - if cse.numIdle > 0 { - return connectivity.Idle - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad06d..e8dfc828aaacb 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 0000000000000..a87b6809af386 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index cb4b3c203c514..cf1034830d580 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,3 +1,22 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 805bbbb789aeb..dab1959418e12 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -35,7 +35,6 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -240,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 274eb2f85802e..f7031ad2251b3 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index f15dddb56218e..d82b714e07012 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -26,12 +26,12 @@ import ( // Name is the name of weighted_round_robin balancer. const Name = "weighted_round_robin" -// attributeKey is the type used as the key to store AddrInfo in the Attributes -// field of resolver.Address. +// attributeKey is the type used as the key to store AddrInfo in the +// BalancerAttributes field of resolver.Address. type attributeKey struct{} -// AddrInfo will be stored inside Address metadata in order to use weighted -// roundrobin balancer. +// AddrInfo will be stored in the BalancerAttributes field of Address in order +// to use weighted roundrobin balancer. type AddrInfo struct { Weight uint32 } @@ -42,8 +42,8 @@ func (a AddrInfo) Equal(o interface{}) bool { return ok && oa.Weight == a.Weight } -// SetAddrInfo returns a copy of addr in which the Attributes field is updated -// with addrInfo. +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with addrInfo. // // Experimental // @@ -54,7 +54,8 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } -// GetAddrInfo returns the AddrInfo stored in the Attributes fields of addr. +// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. // // Experimental // diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go index 7e1d106e9ff9e..38bd9b223f806 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -68,6 +68,11 @@ type Aggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*weightedPickerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } // New creates a new weighted balancer state aggregator. @@ -141,6 +146,27 @@ func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { pState.weight = newWeight } +// PauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (wbsa *Aggregator) PauseStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = true + wbsa.needUpdateStateOnResume = false +} + +// ResumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (wbsa *Aggregator) ResumeStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = false + if wbsa.needUpdateStateOnResume { + wbsa.cc.UpdateState(wbsa.build()) + } +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -166,6 +192,14 @@ func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { if !wbsa.started { return } + + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + wbsa.cc.UpdateState(wbsa.build()) } @@ -191,6 +225,13 @@ func (wbsa *Aggregator) BuildAndUpdate() { if !wbsa.started { return } + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + wbsa.cc.UpdateState(wbsa.build()) } diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index b6fa532b5120e..2582c84c54885 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -90,6 +90,9 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat var rebuildStateAndPicker bool + b.stateAggregator.PauseStateUpdates() + defer b.stateAggregator.ResumeStateUpdates() + // Remove sub-pickers and sub-balancers that are not in the new config. for name := range b.targets { if _, ok := newConfig.Targets[name]; !ok { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea61746823c..b1c23eaae0db2 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 0000000000000..a220c47c59a50 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f9af789137109..779b03bca1c31 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -536,14 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -764,23 +712,26 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -853,16 +804,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -879,6 +845,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +929,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +957,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1070,11 +1027,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1042,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1087,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1284,6 +1241,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1294,7 +1252,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1312,14 +1269,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1288,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1453,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 8bc7ceee0aff2..7b953a520e5bb 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -158,7 +158,7 @@ type altsHandshaker struct { // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } @@ -174,7 +174,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index fd55176b9b698..d3562c6d5e622 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index b8c2e8f9204cf..e32edc0421c3e 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,12 +29,16 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS // - else, do TLS // @@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e9409..9372dc322e803 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,32 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + extraDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +55,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,10 +82,12 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -195,25 +206,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -286,7 +278,7 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -304,8 +296,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -315,7 +307,7 @@ func WithInsecure() DialOption { // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -346,7 +338,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -402,7 +394,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -414,7 +420,7 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -494,11 +500,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -539,9 +545,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently enabled by default, but may be disabled by -// setting the environment variable "GRPC_GO_RETRY" to "off". func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -559,7 +562,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -606,7 +609,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d082..18e530fc90242 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 69f525d1baebf..a332dfd7b54e0 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,3 +1,20 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 0000000000000..08666f62a7cbf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 3142503a0ad5b..3daad14473eeb 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" @@ -67,7 +68,7 @@ type subBalancerWrapper struct { ccState *balancer.ClientConnState // The dynamic part of sub-balancer. Only used when balancer group is // started. Gets cleared when sub-balancer is closed. - balancer balancer.Balancer + balancer *gracefulswitch.Balancer } // UpdateState overrides balancer.ClientConn, to keep state and picker. @@ -93,11 +94,13 @@ func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() { } func (sbc *subBalancerWrapper) startBalancer() { - b := sbc.builder.Build(sbc, sbc.buildOpts) - sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name()) - sbc.balancer = b + if sbc.balancer == nil { + sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) + } + sbc.group.logger.Infof("Creating child policy of type %v", sbc.builder.Name()) + sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { - b.UpdateClientConnState(*sbc.ccState) + sbc.balancer.UpdateClientConnState(*sbc.ccState) } } @@ -108,11 +111,8 @@ func (sbc *subBalancerWrapper) exitIdle() (complete bool) { if b == nil { return true } - if ei, ok := b.(balancer.ExitIdler); ok { - ei.ExitIdle() - return true - } - return false + b.ExitIdle() + return true } func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -160,6 +160,20 @@ func (sbc *subBalancerWrapper) resolverError(err error) { b.ResolverError(err) } +func (sbc *subBalancerWrapper) gracefulSwitch(builder balancer.Builder) { + sbc.builder = builder + b := sbc.balancer + // Even if you get an add and it persists builder but doesn't start + // balancer, this would leave graceful switch being nil, in which we are + // correctly overwriting with the recent builder here as well to use later. + // The graceful switch balancer's presence is an invariant of whether the + // balancer group is closed or not (if closed, nil, if started, present). + if sbc.balancer != nil { + sbc.group.logger.Infof("Switching child policy %v to type %v", sbc.id, sbc.builder.Name()) + b.SwitchTo(sbc.builder) + } +} + func (sbc *subBalancerWrapper) stopBalancer() { sbc.balancer.Close() sbc.balancer = nil @@ -332,6 +346,23 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { bg.outgoingMu.Unlock() } +// UpdateBuilder updates the builder for a current child, starting the Graceful +// Switch process for that child. +func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { + bg.outgoingMu.Lock() + // This does not deal with the balancer cache because this call should come + // after an Add call for a given child balancer. If the child is removed, + // the caller will call Add if the child balancer comes back which would + // then deal with the balancer cache. + sbc := bg.idToBalancerConfig[id] + if sbc == nil { + // simply ignore it if not present, don't error + return + } + sbc.gracefulSwitch(builder) + bg.outgoingMu.Unlock() +} + // Remove removes the balancer with id from the group. // // But doesn't close the balancer. The balancer is kept in a cache, and will be @@ -374,7 +405,6 @@ func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { // sub-balancers. for sc, b := range bg.scToSubBalancer { if b == config { - bg.cc.RemoveSubConn(sc) delete(bg.scToSubBalancer, sc) } } diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go index 1163943850599..816869555323d 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancerstateaggregator.go @@ -26,7 +26,7 @@ import ( // state. // // It takes care of merging sub-picker into one picker. The picking config is -// passed directly from the the parent to the aggregator implementation (instead +// passed directly from the parent to the aggregator implementation (instead // via balancer group). type BalancerStateAggregator interface { // UpdateState updates the state of the id. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb213f..809d73ccafb08 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,35 +31,42 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +107,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602fde1..c5579e65065f6 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831509d..179f4a26d135b 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,13 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +63,9 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +76,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +94,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +132,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index ea660a147cf94..777cbcd7921d9 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -25,6 +25,7 @@ package channelz import ( "context" + "errors" "fmt" "sort" "sync" @@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 0000000000000..c9a27acd3710e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c88650..8e13a3d2ce7b6 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154bd33..ad0ce4dabf06c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 0000000000000..821dd0a7c1988 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 7d996e51b5c17..af09711a3e88d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -77,16 +77,16 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". - XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc4c86..6717b757f80dc 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 0000000000000..ffa0f1ddee5d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 0000000000000..e53b8ffc837f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 0000000000000..2d7aaaaa70fe4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b7663..517ea70642a1f 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060c1c7..e9c4af64830c5 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,6 +39,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 20fb880f344f4..fd0ee3dcaf1ef 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -63,6 +63,70 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearGlobalDialOptions func() + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -85,3 +149,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf340da..b2980f8ac44ad 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go new file mode 100644 index 0000000000000..437dff2201c9d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -0,0 +1,362 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Possible reasons for making a request. +type RouteLookupRequest_Reason int32 + +const ( + RouteLookupRequest_REASON_UNKNOWN RouteLookupRequest_Reason = 0 // Unused + RouteLookupRequest_REASON_MISS RouteLookupRequest_Reason = 1 // No data available in local cache + RouteLookupRequest_REASON_STALE RouteLookupRequest_Reason = 2 // Data in local cache is stale +) + +// Enum value maps for RouteLookupRequest_Reason. +var ( + RouteLookupRequest_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_MISS", + 2: "REASON_STALE", + } + RouteLookupRequest_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_MISS": 1, + "REASON_STALE": 2, + } +) + +func (x RouteLookupRequest_Reason) Enum() *RouteLookupRequest_Reason { + p := new(RouteLookupRequest_Reason) + *p = x + return p +} + +func (x RouteLookupRequest_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteLookupRequest_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_lookup_v1_rls_proto_enumTypes[0].Descriptor() +} + +func (RouteLookupRequest_Reason) Type() protoreflect.EnumType { + return &file_grpc_lookup_v1_rls_proto_enumTypes[0] +} + +func (x RouteLookupRequest_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteLookupRequest_Reason.Descriptor instead. +func (RouteLookupRequest_Reason) EnumDescriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0, 0} +} + +type RouteLookupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target type allows the client to specify what kind of target format it + // would like from RLS to allow it to find the regional server, e.g. "grpc". + TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + // Reason for making this request. + Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` + // For REASON_STALE, the header_data from the stale response, if any. + StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` + // Map of key values extracted via key builders for the gRPC or HTTP request. + KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RouteLookupRequest) Reset() { + *x = RouteLookupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupRequest) ProtoMessage() {} + +func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupRequest.ProtoReflect.Descriptor instead. +func (*RouteLookupRequest) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0} +} + +func (x *RouteLookupRequest) GetTargetType() string { + if x != nil { + return x.TargetType + } + return "" +} + +func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { + if x != nil { + return x.Reason + } + return RouteLookupRequest_REASON_UNKNOWN +} + +func (x *RouteLookupRequest) GetStaleHeaderData() string { + if x != nil { + return x.StaleHeaderData + } + return "" +} + +func (x *RouteLookupRequest) GetKeyMap() map[string]string { + if x != nil { + return x.KeyMap + } + return nil +} + +type RouteLookupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Prioritized list (best one first) of addressable entities to use + // for routing, using syntax requested by the request target_type. + // The targets will be tried in order until a healthy one is found. + Targets []string `protobuf:"bytes,3,rep,name=targets,proto3" json:"targets,omitempty"` + // Optional header value to pass along to AFE in the X-Google-RLS-Data header. + // Cached with "target" and sent with all requests that match the request key. + // Allows the RLS to pass its work product to the eventual target. + HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` +} + +func (x *RouteLookupResponse) Reset() { + *x = RouteLookupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupResponse) ProtoMessage() {} + +func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupResponse.ProtoReflect.Descriptor instead. +func (*RouteLookupResponse) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{1} +} + +func (x *RouteLookupResponse) GetTargets() []string { + if x != nil { + return x.Targets + } + return nil +} + +func (x *RouteLookupResponse) GetHeaderData() string { + if x != nil { + return x.HeaderData + } + return "" +} + +var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, + 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, + 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, + 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_proto_rawDescData = file_grpc_lookup_v1_rls_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ + (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason + (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest + (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse + nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry +} +var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ + 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason + 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_proto_init() } +func file_grpc_lookup_v1_rls_proto_init() { + if File_grpc_lookup_v1_rls_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteLookupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteLookupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_lookup_v1_rls_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_proto_depIdxs, + EnumInfos: file_grpc_lookup_v1_rls_proto_enumTypes, + MessageInfos: file_grpc_lookup_v1_rls_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_proto = out.File + file_grpc_lookup_v1_rls_proto_rawDesc = nil + file_grpc_lookup_v1_rls_proto_goTypes = nil + file_grpc_lookup_v1_rls_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go new file mode 100644 index 0000000000000..7e4c932e20ffb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -0,0 +1,940 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 +// source: grpc/lookup/v1/rls_config.proto + +package grpc_lookup_v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Extract a key based on a given name (e.g. header name or query parameter +// name). The name must match one of the names listed in the "name" field. If +// the "required_match" field is true, one of the specified names must be +// present for the keybuilder to match. +type NameMatcher struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name that will be used in the RLS key_map to refer to this value. + // If required_match is true, you may omit this field or set it to an empty + // string, in which case the matcher will require a match, but won't update + // the key_map. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Ordered list of names (headers or query parameter names) that can supply + // this value; the first one with a non-empty value is used. + Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` + // If true, make this extraction required; the key builder will not match + // if no value is found. + RequiredMatch bool `protobuf:"varint,3,opt,name=required_match,json=requiredMatch,proto3" json:"required_match,omitempty"` +} + +func (x *NameMatcher) Reset() { + *x = NameMatcher{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NameMatcher) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NameMatcher) ProtoMessage() {} + +func (x *NameMatcher) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NameMatcher.ProtoReflect.Descriptor instead. +func (*NameMatcher) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{0} +} + +func (x *NameMatcher) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *NameMatcher) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +func (x *NameMatcher) GetRequiredMatch() bool { + if x != nil { + return x.RequiredMatch + } + return false +} + +// A GrpcKeyBuilder applies to a given gRPC service, name, and headers. +type GrpcKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names []*GrpcKeyBuilder_Name `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + ExtraKeys *GrpcKeyBuilder_ExtraKeys `protobuf:"bytes,3,opt,name=extra_keys,json=extraKeys,proto3" json:"extra_keys,omitempty"` + // Extract keys from all listed headers. + // For gRPC, it is an error to specify "required_match" on the NameMatcher + // protos. + Headers []*NameMatcher `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing the actual method, but need to + // separately cache and request all the matched methods. + ConstantKeys map[string]string `protobuf:"bytes,4,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GrpcKeyBuilder) Reset() { + *x = GrpcKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder) ProtoMessage() {} + +func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1} +} + +func (x *GrpcKeyBuilder) GetNames() []*GrpcKeyBuilder_Name { + if x != nil { + return x.Names + } + return nil +} + +func (x *GrpcKeyBuilder) GetExtraKeys() *GrpcKeyBuilder_ExtraKeys { + if x != nil { + return x.ExtraKeys + } + return nil +} + +func (x *GrpcKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +// An HttpKeyBuilder applies to a given HTTP URL and headers. +// +// Path and host patterns use the matching syntax from gRPC transcoding to +// extract named key/value pairs from the path and host components of the URL: +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +// +// It is invalid to specify the same key name in multiple places in a pattern. +// +// For a service where the project id can be expressed either as a subdomain or +// in the path, separate HttpKeyBuilders must be used: +// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' +// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// If the host is exactly 'example.com', the first path segment will be used as +// the id and the second segment as the object. If the host has a subdomain, the +// subdomain will be used as the id and the first segment as the object. If +// neither pattern matches, no keys will be extracted. +type HttpKeyBuilder struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // host_pattern is an ordered list of host template patterns for the desired + // value. If any host_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A host + // consists of labels separated by dots. Each label is matched against the + // label in the pattern as follows: + // - "*": Matches any single label. + // - "**": Matches zero or more labels (first or last part of host only). + // - "{=...}": One or more label capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single label capture. Identical to {=*}. + // + // Examples: + // - "example.com": Only applies to the exact host example.com. + // - "*.example.com": Matches subdomains of example.com. + // - "**.example.com": matches example.com, and all levels of subdomains. + // - "{project}.example.com": Extracts the third level subdomain. + // - "{project=**}.example.com": Extracts the third level+ subdomains. + // - "{project=**}": Extracts the entire host. + HostPatterns []string `protobuf:"bytes,1,rep,name=host_patterns,json=hostPatterns,proto3" json:"host_patterns,omitempty"` + // path_pattern is an ordered list of path template patterns for the desired + // value. If any path_pattern values are specified, then at least one must + // match, and the last one wins and sets any specified variables. A path + // consists of segments separated by slashes. Each segment is matched against + // the segment in the pattern as follows: + // - "*": Matches any single segment. + // - "**": Matches zero or more segments (first or last part of path only). + // - "{=...}": One or more segment capture, where "..." can be any + // template that does not include a capture. + // - "{}": A single segment capture. Identical to {=*}. + // A custom method may also be specified by appending ":" and the custom + // method name or "*" to indicate any custom method (including no custom + // method). For example, "/*/projects/{project_id}/**:*" extracts + // `{project_id}` for any version, resource and custom method that includes + // it. By default, any custom method will be matched. + // + // Examples: + // - "/v1/{name=messages/*}": extracts a name like "messages/12345". + // - "/v1/messages/{message_id}": extracts a message_id like "12345". + // - "/v1/users/{user_id}/messages/{message_id}": extracts two key values. + PathPatterns []string `protobuf:"bytes,2,rep,name=path_patterns,json=pathPatterns,proto3" json:"path_patterns,omitempty"` + // List of query parameter names to try to match. + // For example: ["parent", "name", "resource.name"] + // We extract all the specified query_parameters (case-sensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given parameter appears multiple times (?foo=a&foo=b) we + // will report it as a comma-separated string (foo=a,b). + QueryParameters []*NameMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` + // List of headers to try to match. + // We extract all the specified header values (case-insensitively). If any + // are marked as "required_match" and are not present, this keybuilder fails + // to match. If a given header appears multiple times in the request we will + // report it as a comma-separated string, in standard HTTP fashion. + Headers []*NameMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` + // You can optionally set one or more specific key/value pairs to be added to + // the key_map. This can be useful to identify which builder built the key, + // for example if you are suppressing a lot of information from the URL, but + // need to separately cache and request URLs with that content. + ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *HttpKeyBuilder) Reset() { + *x = HttpKeyBuilder{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpKeyBuilder) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpKeyBuilder) ProtoMessage() {} + +func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpKeyBuilder.ProtoReflect.Descriptor instead. +func (*HttpKeyBuilder) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{2} +} + +func (x *HttpKeyBuilder) GetHostPatterns() []string { + if x != nil { + return x.HostPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetPathPatterns() []string { + if x != nil { + return x.PathPatterns + } + return nil +} + +func (x *HttpKeyBuilder) GetQueryParameters() []*NameMatcher { + if x != nil { + return x.QueryParameters + } + return nil +} + +func (x *HttpKeyBuilder) GetHeaders() []*NameMatcher { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpKeyBuilder) GetConstantKeys() map[string]string { + if x != nil { + return x.ConstantKeys + } + return nil +} + +type RouteLookupConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Ordered specifications for constructing keys for HTTP requests. Last + // match wins. If no HttpKeyBuilder matches, an empty key_map will be sent to + // the lookup service; it should likely reply with a global default route + // and raise an alert. + HttpKeybuilders []*HttpKeyBuilder `protobuf:"bytes,1,rep,name=http_keybuilders,json=httpKeybuilders,proto3" json:"http_keybuilders,omitempty"` + // Unordered specifications for constructing keys for gRPC requests. All + // GrpcKeyBuilders on this list must have unique "name" fields so that the + // client is free to prebuild a hash map keyed by name. If no GrpcKeyBuilder + // matches, an empty key_map will be sent to the lookup service; it should + // likely reply with a global default route and raise an alert. + GrpcKeybuilders []*GrpcKeyBuilder `protobuf:"bytes,2,rep,name=grpc_keybuilders,json=grpcKeybuilders,proto3" json:"grpc_keybuilders,omitempty"` + // The name of the lookup service as a gRPC URI. Typically, this will be + // a subdomain of the target, such as "lookup.datastore.googleapis.com". + LookupService string `protobuf:"bytes,3,opt,name=lookup_service,json=lookupService,proto3" json:"lookup_service,omitempty"` + // Configure a timeout value for lookup service requests. + // Defaults to 10 seconds if not specified. + LookupServiceTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=lookup_service_timeout,json=lookupServiceTimeout,proto3" json:"lookup_service_timeout,omitempty"` + // How long are responses valid for (like HTTP Cache-Control). + // If omitted or zero, the longest valid cache time is used. + // This value is clamped to 5 minutes to avoid unflushable bad responses. + MaxAge *durationpb.Duration `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` + // After a response has been in the client cache for this amount of time + // and is re-requested, start an asynchronous RPC to re-validate it. + // This value should be less than max_age by at least the length of a + // typical RTT to the Route Lookup Service to fully mask the RTT latency. + // If omitted, keys are only re-requested after they have expired. + StaleAge *durationpb.Duration `protobuf:"bytes,6,opt,name=stale_age,json=staleAge,proto3" json:"stale_age,omitempty"` + // Rough indicator of amount of memory to use for the client cache. Some of + // the data structure overhead is not accounted for, so actual memory consumed + // will be somewhat greater than this value. If this field is omitted or set + // to zero, a client default will be used. The value may be capped to a lower + // amount based on client configuration. + CacheSizeBytes int64 `protobuf:"varint,7,opt,name=cache_size_bytes,json=cacheSizeBytes,proto3" json:"cache_size_bytes,omitempty"` + // This is a list of all the possible targets that can be returned by the + // lookup service. If a target not on this list is returned, it will be + // treated the same as an unhealthy target. + ValidTargets []string `protobuf:"bytes,8,rep,name=valid_targets,json=validTargets,proto3" json:"valid_targets,omitempty"` + // This value provides a default target to use if needed. If set, it will be + // used if RLS returns an error, times out, or returns an invalid response. + // Note that requests can be routed only to a subdomain of the original + // target, e.g. "us_east_1.cloudbigtable.googleapis.com". + DefaultTarget string `protobuf:"bytes,9,opt,name=default_target,json=defaultTarget,proto3" json:"default_target,omitempty"` +} + +func (x *RouteLookupConfig) Reset() { + *x = RouteLookupConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupConfig) ProtoMessage() {} + +func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupConfig.ProtoReflect.Descriptor instead. +func (*RouteLookupConfig) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{3} +} + +func (x *RouteLookupConfig) GetHttpKeybuilders() []*HttpKeyBuilder { + if x != nil { + return x.HttpKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetGrpcKeybuilders() []*GrpcKeyBuilder { + if x != nil { + return x.GrpcKeybuilders + } + return nil +} + +func (x *RouteLookupConfig) GetLookupService() string { + if x != nil { + return x.LookupService + } + return "" +} + +func (x *RouteLookupConfig) GetLookupServiceTimeout() *durationpb.Duration { + if x != nil { + return x.LookupServiceTimeout + } + return nil +} + +func (x *RouteLookupConfig) GetMaxAge() *durationpb.Duration { + if x != nil { + return x.MaxAge + } + return nil +} + +func (x *RouteLookupConfig) GetStaleAge() *durationpb.Duration { + if x != nil { + return x.StaleAge + } + return nil +} + +func (x *RouteLookupConfig) GetCacheSizeBytes() int64 { + if x != nil { + return x.CacheSizeBytes + } + return 0 +} + +func (x *RouteLookupConfig) GetValidTargets() []string { + if x != nil { + return x.ValidTargets + } + return nil +} + +func (x *RouteLookupConfig) GetDefaultTarget() string { + if x != nil { + return x.DefaultTarget + } + return "" +} + +// RouteLookupClusterSpecifier is used in xDS to represent a cluster specifier +// plugin for RLS. +type RouteLookupClusterSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The RLS config for this cluster specifier plugin instance. + RouteLookupConfig *RouteLookupConfig `protobuf:"bytes,1,opt,name=route_lookup_config,json=routeLookupConfig,proto3" json:"route_lookup_config,omitempty"` +} + +func (x *RouteLookupClusterSpecifier) Reset() { + *x = RouteLookupClusterSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupClusterSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupClusterSpecifier) ProtoMessage() {} + +func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupClusterSpecifier.ProtoReflect.Descriptor instead. +func (*RouteLookupClusterSpecifier) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RouteLookupClusterSpecifier) GetRouteLookupConfig() *RouteLookupConfig { + if x != nil { + return x.RouteLookupConfig + } + return nil +} + +// To match, one of the given Name fields must match; the service and method +// fields are specified as fixed strings. The service name is required and +// includes the proto package name. The method name may be omitted, in +// which case any method on the given service is matched. +type GrpcKeyBuilder_Name struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_Name) Reset() { + *x = GrpcKeyBuilder_Name{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_Name) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_Name) ProtoMessage() {} + +func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_Name.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_Name) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *GrpcKeyBuilder_Name) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_Name) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +// If you wish to include the host, service, or method names as keys in the +// generated RouteLookupRequest, specify key names to use in the extra_keys +// submessage. If a key name is empty, no key will be set for that value. +// If this submessage is specified, the normal host/path fields will be left +// unset in the RouteLookupRequest. We are deprecating host/path in the +// RouteLookupRequest, so services should migrate to the ExtraKeys approach. +type GrpcKeyBuilder_ExtraKeys struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,3,opt,name=method,proto3" json:"method,omitempty"` +} + +func (x *GrpcKeyBuilder_ExtraKeys) Reset() { + *x = GrpcKeyBuilder_ExtraKeys{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcKeyBuilder_ExtraKeys) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} + +func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcKeyBuilder_ExtraKeys.ProtoReflect.Descriptor instead. +func (*GrpcKeyBuilder_ExtraKeys) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GrpcKeyBuilder_ExtraKeys) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +var File_grpc_lookup_v1_rls_config_proto protoreflect.FileDescriptor + +var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, + 0x2f, 0x72, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, + 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5c, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0xf0, 0x03, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x12, 0x39, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, + 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x09, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x51, + 0x0a, 0x09, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xf1, 0x02, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f, + 0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, + 0x46, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x35, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, + 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, + 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, + 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, + 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, + 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_lookup_v1_rls_config_proto_rawDescOnce sync.Once + file_grpc_lookup_v1_rls_config_proto_rawDescData = file_grpc_lookup_v1_rls_config_proto_rawDesc +) + +func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { + file_grpc_lookup_v1_rls_config_proto_rawDescOnce.Do(func() { + file_grpc_lookup_v1_rls_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lookup_v1_rls_config_proto_rawDescData) + }) + return file_grpc_lookup_v1_rls_config_proto_rawDescData +} + +var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{ + (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher + (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder + (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder + (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig + (*RouteLookupClusterSpecifier)(nil), // 4: grpc.lookup.v1.RouteLookupClusterSpecifier + (*GrpcKeyBuilder_Name)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.Name + (*GrpcKeyBuilder_ExtraKeys)(nil), // 6: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + nil, // 7: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + nil, // 8: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration +} +var file_grpc_lookup_v1_rls_config_proto_depIdxs = []int32{ + 5, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name + 6, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + 0, // 2: grpc.lookup.v1.GrpcKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 7, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + 0, // 4: grpc.lookup.v1.HttpKeyBuilder.query_parameters:type_name -> grpc.lookup.v1.NameMatcher + 0, // 5: grpc.lookup.v1.HttpKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher + 8, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + 2, // 7: grpc.lookup.v1.RouteLookupConfig.http_keybuilders:type_name -> grpc.lookup.v1.HttpKeyBuilder + 1, // 8: grpc.lookup.v1.RouteLookupConfig.grpc_keybuilders:type_name -> grpc.lookup.v1.GrpcKeyBuilder + 9, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration + 9, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration + 9, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration + 3, // 12: grpc.lookup.v1.RouteLookupClusterSpecifier.route_lookup_config:type_name -> grpc.lookup.v1.RouteLookupConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_grpc_lookup_v1_rls_config_proto_init() } +func file_grpc_lookup_v1_rls_config_proto_init() { + if File_grpc_lookup_v1_rls_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteLookupConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteLookupClusterSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_lookup_v1_rls_config_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_lookup_v1_rls_config_proto_goTypes, + DependencyIndexes: file_grpc_lookup_v1_rls_config_proto_depIdxs, + MessageInfos: file_grpc_lookup_v1_rls_config_proto_msgTypes, + }.Build() + File_grpc_lookup_v1_rls_config_proto = out.File + file_grpc_lookup_v1_rls_config_proto_rawDesc = nil + file_grpc_lookup_v1_rls_config_proto_goTypes = nil + file_grpc_lookup_v1_rls_config_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go new file mode 100644 index 0000000000000..076b966f34460 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -0,0 +1,121 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.14.0 +// source: grpc/lookup/v1/rls.proto + +package grpc_lookup_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// RouteLookupServiceClient is the client API for RouteLookupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RouteLookupServiceClient interface { + // Lookup returns a target for a single key. + RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) +} + +type routeLookupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupServiceClient { + return &routeLookupServiceClient{cc} +} + +func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { + out := new(RouteLookupResponse) + err := c.cc.Invoke(ctx, "/grpc.lookup.v1.RouteLookupService/RouteLookup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RouteLookupServiceServer is the server API for RouteLookupService service. +// All implementations must embed UnimplementedRouteLookupServiceServer +// for forward compatibility +type RouteLookupServiceServer interface { + // Lookup returns a target for a single key. + RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) + mustEmbedUnimplementedRouteLookupServiceServer() +} + +// UnimplementedRouteLookupServiceServer must be embedded to have forward compatible implementations. +type UnimplementedRouteLookupServiceServer struct { +} + +func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") +} +func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} + +// UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RouteLookupServiceServer will +// result in compilation errors. +type UnsafeRouteLookupServiceServer interface { + mustEmbedUnimplementedRouteLookupServiceServer() +} + +func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { + s.RegisterService(&RouteLookupService_ServiceDesc, srv) +} + +func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RouteLookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.lookup.v1.RouteLookupService/RouteLookup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// RouteLookupService_ServiceDesc is the grpc.ServiceDesc for RouteLookupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RouteLookupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lookup.v1.RouteLookupService", + HandlerType: (*RouteLookupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RouteLookup", + Handler: _RouteLookupService_RouteLookup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/lookup/v1/rls.proto", +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 20852e59df298..7f1a702cacbef 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -49,8 +49,9 @@ func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolv } addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df03f..409769f48fdcc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -880,9 +886,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4c58..090120925bb46 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d3371057..5c2f35b24e75b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -78,6 +78,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -90,7 +91,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -109,6 +110,7 @@ type http2Client struct { waitingStreams uint32 nextID uint32 + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -132,7 +134,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -311,7 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, @@ -324,6 +326,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -341,18 +345,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -466,7 +471,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -630,8 +635,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -639,8 +644,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +653,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -685,7 +689,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea cleanup(err) return err } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -719,6 +722,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -744,22 +754,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +772,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -898,9 +906,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +923,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1001,13 +1007,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1213,7 +1219,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. @@ -1226,18 +1232,29 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } @@ -1433,7 +1450,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1458,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59cfb..3dd15647bc84b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -260,6 +265,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -267,20 +275,20 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -443,6 +451,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -479,14 +488,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -516,14 +518,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -544,6 +548,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -561,8 +566,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +576,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -925,12 +930,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -939,10 +959,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -973,14 +991,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -990,17 +1008,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -1029,7 +1049,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -1041,10 +1061,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1076,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1091,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1210,25 +1214,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1248,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1272,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1404,6 +1412,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf6924..2c601a864d995 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,7 +20,6 @@ package transport import ( "bufio" - "bytes" "encoding/base64" "fmt" "io" @@ -45,14 +44,8 @@ import ( const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -257,13 +250,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +265,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,23 +290,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -322,8 +315,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +351,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 0c43efaa64977..6c3ba8515940e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -522,14 +523,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +553,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 6f30c8016e2be..9873da268db6e 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -122,8 +122,11 @@ func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, err } matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) case *v3rbacpb.Permission_Metadata: - // Not supported in gRPC RBAC currently - a permission typed as - // Metadata in the initial config will be a no-op. + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } case *v3rbacpb.Permission_RequestedServerName: // Not supported in gRPC RBAC currently - a permission typed as // requested server name in the initial config will be a no-op. diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819fdc4..98d62e0675f61 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -50,7 +50,7 @@ type MD map[string][]string // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -74,7 +74,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -182,17 +182,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -220,13 +254,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb8993b2..843633c910a11 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b078a8..fb7a99e0a2734 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 978b89f37a4ab..99db79fafcfb8 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -80,8 +79,7 @@ SOURCES=( # Note that the protos listed here are all for testing purposes. All protos to # be used externally should have a go_package option (and they don't need to be # listed here). -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ -Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ @@ -121,9 +119,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb38e..efcb7f3efd82d 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b680260627..ca2e35a3596f7 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f07c9..05a9d4e0bac00 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index b24b6d53958d4..f4dde72b41f89 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + extraServerOptions = append(extraServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + extraServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -134,7 +142,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -149,8 +157,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +183,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +193,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,6 +217,22 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. @@ -298,7 +324,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -419,7 +445,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +461,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +502,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +523,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +538,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -560,6 +600,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +627,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +754,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +766,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +800,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +809,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +910,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -887,7 +931,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -945,24 +989,24 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1075,8 +1119,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1123,13 +1169,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1206,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1180,9 +1226,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1202,7 +1255,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1242,7 +1297,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1314,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1268,10 +1323,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Length: len(d), }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1295,18 +1353,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return appErr } @@ -1332,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(sh) + binlog.Log(sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1363,11 +1435,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(st) + } } return err } @@ -1417,16 +1492,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1438,10 +1515,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1455,7 +1532,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1463,7 +1540,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1476,8 +1555,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1496,7 +1582,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1562,11 +1650,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } // TODO: Should we log an error from WriteStatus here and below? return appErr @@ -1577,11 +1668,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(st) + } } return err } @@ -1657,7 +1751,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1672,7 +1766,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1687,7 +1781,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1709,11 +1803,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1751,11 +1841,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1808,12 +1894,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1825,8 +1925,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1840,6 +1946,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 6926a06dc523b..01bbb2025aed4 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -57,10 +57,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -381,6 +380,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8cdd652e037b9..0c16cfb2ea807 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -139,13 +140,13 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. @@ -166,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -295,20 +301,35 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -322,7 +343,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } if desc != unaryStreamDesc { @@ -343,14 +366,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -379,27 +408,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. @@ -407,16 +415,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -425,12 +449,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -456,7 +489,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -506,8 +539,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -527,41 +565,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -573,14 +591,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -597,10 +615,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -645,19 +663,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -667,7 +690,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -681,6 +707,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -697,7 +735,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -715,7 +753,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Only log if binary log is on and header has not been logged. logEntry := &binarylog.ServerHeader{ OnClientSide: true, @@ -725,10 +763,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -746,10 +786,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -797,47 +836,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -850,7 +890,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(logEntry) + } } } return err @@ -871,10 +913,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(chc) + } } // We never returned an error here for reasons. return nil @@ -907,10 +952,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -943,8 +991,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -954,7 +1002,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -982,6 +1030,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -991,8 +1040,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1051,7 +1100,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1059,7 +1108,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1364,8 +1413,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1426,9 +1477,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1448,17 +1499,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } return err } @@ -1467,6 +1530,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1512,20 +1578,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1559,13 +1633,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(chc) + } } return err } @@ -1574,20 +1651,25 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(cm) + } } return nil } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5bd4f534c1d3a..d472ca64307bb 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.45.0" +const Version = "1.50.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index ceb436c6ce47b..c3fc8253b13a6 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -147,7 +147,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go index 8d81aced2dd56..68ed789f2a4de 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go @@ -20,10 +20,11 @@ package balancer import ( - _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer - _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // Register the outlier_detection balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer ) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 0be796c47bade..14c1c2e769aab 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -36,6 +37,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -270,6 +272,52 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } +func outlierDetectionToConfig(od *xdsresource.OutlierDetection) outlierdetection.LBConfig { // Already validated - no need to return error + if od == nil { + // "If the outlier_detection field is not set in the Cluster message, a + // "no-op" outlier_detection config will be generated, with interval set + // to the maximum possible value and all other fields unset." - A50 + return outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + } + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *outlierdetection.SuccessRateEjection + if od.EnforcingSuccessRate != 0 { + sre = &outlierdetection.SuccessRateEjection{ + StdevFactor: od.SuccessRateStdevFactor, + EnforcementPercentage: od.EnforcingSuccessRate, + MinimumHosts: od.SuccessRateMinimumHosts, + RequestVolume: od.SuccessRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *outlierdetection.FailurePercentageEjection + if od.EnforcingFailurePercentage != 0 { + fpe = &outlierdetection.FailurePercentageEjection{ + Threshold: od.FailurePercentageThreshold, + EnforcementPercentage: od.EnforcingFailurePercentage, + MinimumHosts: od.FailurePercentageMinimumHosts, + RequestVolume: od.FailurePercentageRequestVolume, + } + } + + return outlierdetection.LBConfig{ + Interval: od.Interval, + BaseEjectionTime: od.BaseEjectionTime, + MaxEjectionTime: od.MaxEjectionTime, + MaxEjectionPercent: od.MaxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } +} + // handleWatchUpdate handles a watch update from the xDS Client. Good updates // lead to clientConn updates being invoked on the underlying cluster_resolver balancer. func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { @@ -342,6 +390,9 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { default: b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) } + if envconfig.XDSOutlierDetection { + dms[i].OutlierDetection = outlierDetectionToConfig(cu.OutlierDetection) + } } lbCfg := &clusterresolver.LBConfig{ DiscoveryMechanisms: dms, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go index a10d8d772f2be..234511a45dcf9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -24,7 +24,12 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") +const maxDepth = 16 + +var ( + errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") +) // clusterHandlerUpdate wraps the information received from the registered CDS // watcher. A non-nil error is propagated to the underlying cluster_resolver @@ -54,9 +59,10 @@ type clusterHandler struct { // A mutex to protect entire tree of clusters. clusterMutex sync.Mutex - root *clusterNode rootClusterName string + createdClusters map[string]*clusterNode + // A way to ping CDS Balancer about any updates or errors to a Node in the // tree. This will either get called from this handler constructing an // update or from a child with an error. Capacity of one as the only update @@ -66,39 +72,48 @@ type clusterHandler struct { func newClusterHandler(parent *cdsBalancer) *clusterHandler { return &clusterHandler{ - parent: parent, - updateChannel: make(chan clusterHandlerUpdate, 1), + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + createdClusters: make(map[string]*clusterNode), } } func (ch *clusterHandler) updateRootCluster(rootClusterName string) { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // Construct a root node on first update. - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName return } // Check if root cluster was changed. If it was, delete old one and start // new one, if not do nothing. if rootClusterName != ch.rootClusterName { - ch.root.delete() - ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + ch.createdClusters[ch.rootClusterName].delete() + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) ch.rootClusterName = rootClusterName } } // This function tries to construct a cluster update to send to CDS. func (ch *clusterHandler) constructClusterUpdate() { - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { // If root is nil, this handler is closed, ignore the update. return } - clusterUpdate, err := ch.root.constructClusterUpdate() + clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) if err != nil { - // If there was an error received no op, as this simply means one of the - // children hasn't received an update yet. + // If there was an error received no op, as this can mean one of the + // children hasn't received an update yet, or the graph continued to + // stay in an error state. If the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + return + } + if clusterUpdate == nil { + // This means that there was an aggregated cluster with no EDS or DNS as + // leaf nodes. No update to be written. return } // For a ClusterUpdate, the only update CDS cares about is the most @@ -109,8 +124,8 @@ func (ch *clusterHandler) constructClusterUpdate() { default: } ch.updateChannel <- clusterHandlerUpdate{ - securityCfg: ch.root.clusterUpdate.SecurityCfg, - lbPolicy: ch.root.clusterUpdate.LBPolicy, + securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, + lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, updates: clusterUpdate, } } @@ -120,11 +135,10 @@ func (ch *clusterHandler) constructClusterUpdate() { func (ch *clusterHandler) close() { ch.clusterMutex.Lock() defer ch.clusterMutex.Unlock() - if ch.root == nil { + if ch.createdClusters[ch.rootClusterName] == nil { return } - ch.root.delete() - ch.root = nil + ch.createdClusters[ch.rootClusterName].delete() ch.rootClusterName = "" } @@ -136,7 +150,7 @@ type clusterNode struct { cancelFunc func() // A list of children, as the Node can be an aggregate Cluster. - children []*clusterNode + children []string // A ClusterUpdate in order to build a list of cluster updates for CDS to // send down to child XdsClusterResolverLoadBalancingPolicy. @@ -149,13 +163,30 @@ type clusterNode struct { receivedUpdate bool clusterHandler *clusterHandler + + depth int32 + refCount int32 + + // maxDepthErr is set if this cluster node is an aggregate cluster and has a + // child that causes the graph to exceed the maximum depth allowed. This is + // used to show a cluster graph as being in an error state when it constructs + // a cluster update. + maxDepthErr error } // CreateClusterNode creates a cluster node from a given clusterName. This will // also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler) *clusterNode { +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { + // If the cluster has already been created, simply return, which ignores + // duplicates. + if topLevelHandler.createdClusters[clusterName] != nil { + topLevelHandler.createdClusters[clusterName].refCount++ + return + } c := &clusterNode{ clusterHandler: topLevelHandler, + depth: depth, + refCount: 1, } // Communicate with the xds client here. topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) @@ -164,25 +195,43 @@ func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLev topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) cancel() } - return c + topLevelHandler.createdClusters[clusterName] = c } // This function cancels the cluster watch on the cluster and all of it's // children. func (c *clusterNode) delete() { - c.cancelFunc() - for _, child := range c.children { - child.delete() + c.refCount-- + if c.refCount == 0 { + c.cancelFunc() + delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) + for _, child := range c.children { + if c.clusterHandler.createdClusters[child] != nil { + c.clusterHandler.createdClusters[child].delete() + } + } } } // Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) { +func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { // If the cluster has not yet received an update, the cluster update is not // yet ready. if !c.receivedUpdate { return nil, errNotReceivedUpdate } + if c.maxDepthErr != nil { + return nil, c.maxDepthErr + } + // Ignore duplicates. It's ok to ignore duplicates because the second + // occurrence of a cluster will never be used. I.e. in [C, D, C], the second + // C will never be used (the only way to fall back to lower priority D is if + // C is down, which means second C will never be chosen). Thus, [C, D, C] is + // logically equivalent to [C, D]. + if clustersSeen[c.clusterUpdate.ClusterName] { + return []xdsresource.ClusterUpdate{}, nil + } + clustersSeen[c.clusterUpdate.ClusterName] = true // Base case - LogicalDNS or EDS. Both of these cluster types will be tied // to a single ClusterUpdate. @@ -194,7 +243,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, err // it's children. var childrenUpdates []xdsresource.ClusterUpdate for _, child := range c.children { - childUpdateList, err := child.constructClusterUpdate() + childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) if err != nil { return nil, err } @@ -219,6 +268,8 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er default: } c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} + c.receivedUpdate = false + c.maxDepthErr = nil return } @@ -233,9 +284,10 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // cluster. if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { for _, child := range c.children { - child.delete() + c.clusterHandler.createdClusters[child].delete() } c.children = nil + c.maxDepthErr = nil // This is an update in the one leaf node, should try to send an update // to the parent CDS balancer. // @@ -248,6 +300,22 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er } // Aggregate cluster handling. + if len(clusterUpdate.PrioritizedClusterNames) >= 1 { + if c.depth == maxDepth-1 { + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} + c.children = []string{} + c.maxDepthErr = errExceedsMaxDepth + return + } + } + newChildren := make(map[string]bool) for _, childName := range clusterUpdate.PrioritizedClusterNames { newChildren[childName] = true @@ -261,59 +329,42 @@ func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err er // the update to build (ex. if a child is created and a watch is started, // that child hasn't received an update yet due to the mutex lock on this // callback). - var createdChild, deletedChild bool + var createdChild bool // This map will represent the current children of the cluster. It will be // first added to in order to represent the new children. It will then have - // any children deleted that are no longer present. Then, from the cluster - // update received, will be used to construct the new child list. - mapCurrentChildren := make(map[string]*clusterNode) + // any children deleted that are no longer present. + mapCurrentChildren := make(map[string]bool) for _, child := range c.children { - mapCurrentChildren[child.clusterUpdate.ClusterName] = child + mapCurrentChildren[child] = true } // Add and construct any new child nodes. for child := range newChildren { if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { - createdChild = true - mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler) + createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) } } // Delete any child nodes no longer in the aggregate cluster's children. for child := range mapCurrentChildren { if _, stillAChild := newChildren[child]; !stillAChild { - deletedChild = true - mapCurrentChildren[child].delete() + c.clusterHandler.createdClusters[child].delete() delete(mapCurrentChildren, child) } } - // The order of the children list matters, so use the clusterUpdate from - // xdsclient as the ordering, and use that logical ordering for the new - // children list. This will be a mixture of child nodes which are all - // already constructed in the mapCurrentChildrenMap. - var children = make([]*clusterNode, 0, len(clusterUpdate.PrioritizedClusterNames)) - - for _, orderedChild := range clusterUpdate.PrioritizedClusterNames { - // The cluster's already have watches started for them in xds client, so - // you can use these pointers to construct the new children list, you - // just have to put them in the correct order using the original cluster - // update. - currentChild := mapCurrentChildren[orderedChild] - children = append(children, currentChild) - } - - c.children = children + c.children = clusterUpdate.PrioritizedClusterNames + c.maxDepthErr = nil // If the cluster is an aggregate cluster, if this callback created any new // child cluster nodes, then there's no possibility for a full cluster // update to successfully build, as those created children will not have - // received an update yet. However, if there was simply a child deleted, - // then there is a possibility that it will have a full cluster update to - // build and also will have a changed overall cluster update from the - // deleted child. - if deletedChild && !createdChild { + // received an update yet. Even if this update did not delete a child, there + // is still a possibility for the cluster update to build, as the aggregate + // cluster can ignore duplicated children and thus the update can fill out + // the full cluster update tree. + if !createdChild { c.clusterHandler.constructClusterUpdate() } } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index 8cce075530821..360fc44c9e4da 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -19,7 +19,6 @@ package clusterimpl import ( - orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -27,6 +26,8 @@ import ( "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/load" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) // NewRandomWRR is used when calculating drops. It's exported so that tests can @@ -158,7 +159,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } d.loadStore.CallFinished(lIDStr, info.Err) - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) + load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) if !ok { return } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 6e0e03299f95a..4b971a3e241be 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -57,6 +57,11 @@ type balancerStateAggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*subBalancerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLogger) *balancerStateAggregator { @@ -118,6 +123,27 @@ func (bsa *balancerStateAggregator) remove(id string) { delete(bsa.idToPickerState, id) } +// pauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (bsa *balancerStateAggregator) pauseStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = true + bsa.needUpdateStateOnResume = false +} + +// resumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (bsa *balancerStateAggregator) resumeStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = false + if bsa.needUpdateStateOnResume { + bsa.cc.UpdateState(bsa.build()) + } +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -143,6 +169,12 @@ func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } @@ -168,6 +200,12 @@ func (bsa *balancerStateAggregator) buildAndUpdate() { if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index 8d71200d8c61a..6ac7a39b2b4c6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -93,6 +93,11 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) { b.stateAggregator.add(name) // Then add to the balancer group. b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + } else { + // Already present, check for type change and if so send down a new builder. + if newT.ChildPolicy.Name != b.children[name].ChildPolicy.Name { + b.bg.UpdateBuilder(name, balancer.Get(newT.ChildPolicy.Name)) + } } // TODO: handle error? How to aggregate errors and return? _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ @@ -118,6 +123,8 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { } b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + b.stateAggregator.pauseStateUpdates() + defer b.stateAggregator.resumeStateUpdates() b.updateChildren(s, newConfig) return nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index d49014cfa4339..9b373fb36970b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -23,10 +23,12 @@ import ( "encoding/json" "errors" "fmt" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -35,6 +37,7 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -99,6 +102,9 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) } + if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, roundrobin.Name) && !strings.EqualFold(lbp.Name, ringhash.Name) { + return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, roundrobin.Name, ringhash.Name) + } return &cfg, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go index 363afd03ab2c4..2458b106772f2 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -21,12 +21,10 @@ import ( "bytes" "encoding/json" "fmt" - "strings" - "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) @@ -102,6 +100,9 @@ type DiscoveryMechanism struct { // DNSHostname is the DNS name to resolve in "host:port" form. For type // LOGICAL_DNS only. DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection outlierdetection.LBConfig `json:"outlierDetection,omitempty"` } // Equal returns whether the DiscoveryMechanism is the same with the parameter. @@ -117,6 +118,8 @@ func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { return false case dm.DNSHostname != b.DNSHostname: return false + case !dm.OutlierDetection.EqualIgnoringChildPolicy(&b.OutlierDetection): + return false } if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { @@ -161,19 +164,3 @@ type LBConfig struct { // is responsible for both locality picking and endpoint picking. XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` } - -const ( - rrName = roundrobin.Name - rhName = ringhash.Name -) - -func parseConfig(c json.RawMessage) (*LBConfig, error) { - var cfg LBConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, err - } - if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, rrName) && !strings.EqualFold(lbp.Name, rhName) { - return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, rrName, rhName) - } - return &cfg, nil -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index 4cce16ff9a3d5..a29658ec31414 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -26,11 +26,13 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" "google.golang.org/grpc/xds/internal/balancer/priority" "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" @@ -51,6 +53,9 @@ type priorityConfig struct { edsResp xdsresource.EndpointsUpdate // addresses is set only if type is DNS. addresses []string + // Each discovery mechanism has a name generator so that the child policies + // can reuse names between updates (EDS updates for example). + childNameGen *nameGenerator } // buildPriorityConfigJSON builds balancer config for the passed in @@ -118,42 +123,80 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retAddrs []resolver.Address ) - for i, p := range priorities { + for _, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: - names, configs, addrs, err := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, xdsLBPolicy) + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) if err != nil { return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) + retAddrs = append(retAddrs, addrs...) + var odCfgs map[string]*outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.OutlierDetection) + for n, c := range odCfgs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + continue + } for n, c := range configs { retConfig.Children[n] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c}, // Ignore all re-resolution from EDS children. IgnoreReresolutionRequests: true, } + } - retAddrs = append(retAddrs, addrs...) case DiscoveryMechanismTypeLogicalDNS: - name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses, p.mechanism) + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) + var odCfg *outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.OutlierDetection) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + continue + } retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, // Not ignore re-resolution from DNS children, they will trigger // DNS to re-resolve. IgnoreReresolutionRequests: false, } - retAddrs = append(retAddrs, addrs...) } } return retConfig, retAddrs, nil } -func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { +func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { + odCfgs := make(map[string]*outlierdetection.LBConfig, len(ciCfgs)) + for n, c := range ciCfgs { + odCfgs[n] = makeClusterImplOutlierDetectionChild(c, odCfg) + } + return odCfgs +} + +func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) *outlierdetection.LBConfig { + odCfgRet := odCfg + odCfgRet.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: ciCfg} + return &odCfgRet +} + +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" retAddrs := make([]resolver.Address, 0, len(addrStrs)) - pName := fmt.Sprintf("priority-%v", parentPriority) + pName := fmt.Sprintf("priority-%v", g.prefix) for _, addrStr := range addrStrs { retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) } @@ -172,7 +215,7 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string, mechani // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -181,15 +224,12 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.Endpoi }) } - priorityChildNames, priorities := groupLocalitiesByPriority(edsResp.Localities) - retNames := make([]string, 0, len(priorityChildNames)) - retAddrs := make([]resolver.Address, 0, len(priorityChildNames)) - retConfigs := make(map[string]*clusterimpl.LBConfig, len(priorityChildNames)) - for _, priorityName := range priorityChildNames { - priorityLocalities := priorities[priorityName] - // Prepend parent priority to the priority names, to avoid duplicates. - pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) - retNames = append(retNames, pName) + priorities := groupLocalitiesByPriority(edsResp.Localities) + retNames := g.generate(priorities) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) if err != nil { return nil, nil, nil, err @@ -202,33 +242,29 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.Endpoi // groupLocalitiesByPriority returns the localities grouped by priority. // -// It also returns a list of strings where each string represents a priority, -// and the list is sorted from higher priority to lower priority. +// The returned list is sorted from higher priority to lower. Each item in the +// list is a group of localities. // // For example, for L0-p0, L1-p0, L2-p1, results will be -// - ["p0", "p1"] -// - map{"p0":[L0, L1], "p1":[L2]} -func groupLocalitiesByPriority(localities []xdsresource.Locality) ([]string, map[string][]xdsresource.Locality) { +// - [[L0, L1], [L2]] +func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresource.Locality { var priorityIntSlice []int - priorities := make(map[string][]xdsresource.Locality) + priorities := make(map[int][]xdsresource.Locality) for _, locality := range localities { - if locality.Weight == 0 { - continue - } - priorityName := fmt.Sprintf("%v", locality.Priority) - priorities[priorityName] = append(priorities[priorityName], locality) - priorityIntSlice = append(priorityIntSlice, int(locality.Priority)) + priority := int(locality.Priority) + priorities[priority] = append(priorities[priority], locality) + priorityIntSlice = append(priorityIntSlice, priority) } // Sort the priorities based on the int value, deduplicate, and then turn // the sorted list into a string list. This will be child names, in priority // order. sort.Ints(priorityIntSlice) priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) - priorityNameSlice := make([]string, 0, len(priorityIntSliceDeduped)) + ret := make([][]xdsresource.Locality, 0, len(priorityIntSliceDeduped)) for _, p := range priorityIntSliceDeduped { - priorityNameSlice = append(priorityNameSlice, fmt.Sprintf("%v", p)) + ret = append(ret, priorities[p]) } - return priorityNameSlice, priorities + return ret } func dedupSortedIntSlice(a []int) []int { @@ -265,22 +301,22 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority // ChildPolicy is not set. Will be set based on xdsLBPolicy } - if xdsLBPolicy == nil || xdsLBPolicy.Name == rrName { + if xdsLBPolicy == nil || xdsLBPolicy.Name == roundrobin.Name { // If lb policy is ROUND_ROBIN: // - locality-picking policy is weighted_target // - endpoint-picking policy is round_robin - logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", rrName) + logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", roundrobin.Name) // Child of weighted_target is hardcoded to round_robin. wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} return clusterImplCfg, addrs, nil } - if xdsLBPolicy.Name == rhName { + if xdsLBPolicy.Name == ringhash.Name { // If lb policy is RIHG_HASH, will build one ring_hash policy as child. // The endpoints from all localities will be flattened to one addresses // list, and the ring_hash policy will pick endpoints from it. - logger.Infof("xds lb policy is %q, building config with ring_hash", rhName) + logger.Infof("xds lb policy is %q, building config with ring_hash", ringhash.Name) addrs := localitiesToRingHash(localities, priorityName) // Set child to ring_hash, note that the ring_hash config is from // xdsLBPolicy. @@ -288,7 +324,7 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority return clusterImplCfg, addrs, nil } - return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, rrName, rhName) + return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, roundrobin.Name, ringhash.Name) } // localitiesToRingHash takes a list of localities (with the same priority), and diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go new file mode 100644 index 0000000000000..119f4c4747526 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder_childname.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// nameGenerator generates a child name for a list of priorities (each priority +// is a list of localities). +// +// The purpose of this generator is to reuse names between updates. So the +// struct keeps state between generate() calls, and a later generate() might +// return names returned by the previous call. +type nameGenerator struct { + existingNames map[internal.LocalityID]string + prefix uint64 + nextID uint64 +} + +func newNameGenerator(prefix uint64) *nameGenerator { + return &nameGenerator{prefix: prefix} +} + +// generate returns a list of names for the given list of priorities. +// +// Each priority is a list of localities. The name for the priority is picked as +// - for each locality in this priority, if it exists in the existing names, +// this priority will reuse the name +// - if no reusable name is found for this priority, a new name is generated +// +// For example: +// - update 1: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 2: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) +// - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) +func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { + var ret []string + usedNames := make(map[string]bool) + newNames := make(map[internal.LocalityID]string) + for _, priority := range priorities { + var nameFound string + for _, locality := range priority { + if name, ok := ng.existingNames[locality.ID]; ok { + if !usedNames[name] { + nameFound = name + // Found a name to use. No need to process the remaining + // localities. + break + } + } + } + + if nameFound == "" { + // No appropriate used name is found. Make a new name. + nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + ng.nextID++ + } + + ret = append(ret, nameFound) + // All localities in this priority share the same name. Add them all to + // the new map. + for _, l := range priority { + newNames[l.ID] = nameFound + } + usedNames[nameFound] = true + } + ng.existingNames = newNames + return ret +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 9d7db26ad14a0..9c2fc6e7c7973 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -55,6 +55,8 @@ type resolverMechanismTuple struct { dm DiscoveryMechanism dmKey discoveryMechanismKey r discoveryMechanism + + childNameGen *nameGenerator } type resourceResolver struct { @@ -62,17 +64,28 @@ type resourceResolver struct { updateChannel chan *resourceUpdate // mu protects the slice and map, and content of the resolvers in the slice. - mu sync.Mutex - mechanisms []DiscoveryMechanism - children []resolverMechanismTuple - childrenMap map[discoveryMechanismKey]discoveryMechanism + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []resolverMechanismTuple + // childrenMap's value only needs the resolver implementation (type + // discoveryMechanism) and the childNameGen. The other two fields are not + // used. + // + // TODO(cleanup): maybe we can make a new type with just the necessary + // fields, and use it here instead. + childrenMap map[discoveryMechanismKey]resolverMechanismTuple + // Each new discovery mechanism needs a child name generator to reuse child + // policy names. But to make sure the names across discover mechanism + // doesn't conflict, we need a seq ID. This ID is incremented for each new + // discover mechanism. + childNameGeneratorSeqID uint64 } func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { return &resourceResolver{ parent: parent, updateChannel: make(chan *resourceUpdate, 1), - childrenMap: make(map[discoveryMechanismKey]discoveryMechanism), + childrenMap: make(map[discoveryMechanismKey]resolverMechanismTuple), } } @@ -112,31 +125,54 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} newDMs[dmKey] = true - r := rr.childrenMap[dmKey] - if r == nil { - r = newEDSResolver(nameToWatch, rr.parent.xdsClient, rr) + r, ok := rr.childrenMap[dmKey] + if !ok { + r = resolverMechanismTuple{ + dm: dm, + dmKey: dmKey, + r: newEDSResolver(nameToWatch, rr.parent.xdsClient, rr), + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } rr.childrenMap[dmKey] = r + rr.childNameGeneratorSeqID++ + } else { + // If this is not new, keep the fields (especially + // childNameGen), and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not + // copied to dmKey, we need to keep those updated. + r.dm = dm } - rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + rr.children[i] = r case DiscoveryMechanismTypeLogicalDNS: // Name to resolve in DNS is the hostname, not the ClientConn // target. dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} newDMs[dmKey] = true - r := rr.childrenMap[dmKey] - if r == nil { - r = newDNSResolver(dm.DNSHostname, rr) + r, ok := rr.childrenMap[dmKey] + if !ok { + r = resolverMechanismTuple{ + dm: dm, + dmKey: dmKey, + r: newDNSResolver(dm.DNSHostname, rr), + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } rr.childrenMap[dmKey] = r + rr.childNameGeneratorSeqID++ + } else { + r.dm = dm } - rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + rr.children[i] = r } } // Stop the resources that were removed. for dm, r := range rr.childrenMap { if !newDMs[dm] { delete(rr.childrenMap, dm) - r.stop() + r.r.stop() } } // Regenerate even if there's no change in discovery mechanism, in case @@ -150,19 +186,27 @@ func (rr *resourceResolver) resolveNow() { rr.mu.Lock() defer rr.mu.Unlock() for _, r := range rr.childrenMap { - r.resolveNow() + r.r.resolveNow() } } func (rr *resourceResolver) stop() { rr.mu.Lock() - defer rr.mu.Unlock() - for dm, r := range rr.childrenMap { - delete(rr.childrenMap, dm) - r.stop() - } + // Save the previous childrenMap to stop the children outside the mutex, + // and reinitialize the map. We only need to reinitialize to allow for the + // policy to be reused if the resource comes back. In practice, this does + // not happen as the parent LB policy will also be closed, causing this to + // be removed entirely, but a future use case might want to reuse the + // policy instead. + cm := rr.childrenMap + rr.childrenMap = make(map[discoveryMechanismKey]resolverMechanismTuple) rr.mechanisms = nil rr.children = nil + rr.mu.Unlock() + + for _, r := range cm { + r.r.stop() + } } // generate collects all the updates from all the resolvers, and push the @@ -174,13 +218,7 @@ func (rr *resourceResolver) stop() { func (rr *resourceResolver) generate() { var ret []priorityConfig for _, rDM := range rr.children { - r, ok := rr.childrenMap[rDM.dmKey] - if !ok { - rr.parent.logger.Infof("resolver for %+v not found, should never happen", rDM.dmKey) - continue - } - - u, ok := r.lastUpdate() + u, ok := rDM.r.lastUpdate() if !ok { // Don't send updates to parent until all resolvers have update to // send. @@ -188,9 +226,9 @@ func (rr *resourceResolver) generate() { } switch uu := u.(type) { case xdsresource.EndpointsUpdate: - ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) case []string: - ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) } } select { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go new file mode 100644 index 0000000000000..062a8e5e48d2e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -0,0 +1,894 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package outlierdetection provides an implementation of the outlier detection +// LB policy, as defined in +// https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md. +package outlierdetection + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Globals to stub out in tests. +var ( + afterFunc = time.AfterFunc + now = time.Now +) + +// Name is the name of the outlier detection balancer. +const Name = "outlier_detection_experimental" + +func init() { + if envconfig.XDSOutlierDetection { + balancer.Register(bb{}) + } +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &outlierDetectionBalancer{ + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.child = gracefulswitch.NewBalancer(b, bOpts) + go b.run() + return b +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { // Validates child config if present as well. + return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) + } + + // Note: in the xds flow, these validations will never fail. The xdsclient + // performs the same validations as here on the xds Outlier Detection + // resource before parsing into the internal struct which gets marshaled + // into JSON before calling this function. A50 defines two separate places + // for these validations to take place, the xdsclient and this ParseConfig + // method. "When parsing a config from JSON, if any of these requirements is + // violated, that should be treated as a parsing error." - A50 + + switch { + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + // Approximately 290 years is the maximum time that time.Duration (int64) + // can represent. The restrictions on the protobuf.Duration field are to be + // within +-10000 years. Thus, just check for negative values. + case lbCfg.Interval < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.interval = %s; must be >= 0", lbCfg.Interval) + case lbCfg.BaseEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) + case lbCfg.MaxEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + + // "The fields max_ejection_percent, + // success_rate_ejection.enforcement_percentage, + // failure_percentage_ejection.threshold, and + // failure_percentage.enforcement_percentage must have values less than or + // equal to 100." - A50 + case lbCfg.MaxEjectionPercent > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_percent = %v; must be <= 100", lbCfg.MaxEjectionPercent) + case lbCfg.SuccessRateEjection != nil && lbCfg.SuccessRateEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = %v; must be <= 100", lbCfg.SuccessRateEjection.EnforcementPercentage) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.Threshold > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) + case lbCfg.ChildPolicy == nil: + return nil, errors.New("OutlierDetectionLoadBalancingConfig.child_policy must be present") + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// scUpdate wraps a subConn update to be sent to the child balancer. +type scUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + +type ejectionUpdate struct { + scw *subConnWrapper + isEjected bool // true for ejected, false for unejected +} + +type lbCfgUpdate struct { + lbCfg *LBConfig + // to make sure picker is updated synchronously. + done chan struct{} +} + +type outlierDetectionBalancer struct { + // These fields are safe to be accessed without holding any mutex because + // they are synchronized in run(), which makes these field accesses happen + // serially. + // + // childState is the latest balancer state received from the child. + childState balancer.State + // recentPickerNoop represents whether the most recent picker sent upward to + // the balancer.ClientConn is a noop picker, which doesn't count RPC's. Used + // to suppress redundant picker updates. + recentPickerNoop bool + + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer + + // mu guards access to the following fields. It also helps to synchronize + // behaviors of the following events: config updates, firing of the interval + // timer, SubConn State updates, SubConn address updates, and child state + // updates. + // + // For example, when we receive a config update in the middle of the + // interval timer algorithm, which uses knobs present in the config, the + // balancer will wait for the interval timer algorithm to finish before + // persisting the new configuration. + // + // Another example would be the updating of the addrs map, such as from a + // SubConn address update in the middle of the interval timer algorithm + // which uses addrs. This balancer waits for the interval timer algorithm to + // finish before making the update to the addrs map. + // + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). + mu sync.Mutex + addrs map[string]*addressInfo + cfg *LBConfig + scWrappers map[balancer.SubConn]*subConnWrapper + timerStartTime time.Time + intervalTimer *time.Timer + inhibitPickerUpdates bool + updateUnconditionally bool + numAddrsEjected int // For fast calculations of percentage of addrs ejected + + scUpdateCh *buffer.Unbounded + pickerUpdateCh *buffer.Unbounded +} + +// noopConfig returns whether this balancer is configured with a logical no-op +// configuration or not. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) noopConfig() bool { + return b.cfg.SuccessRateEjection == nil && b.cfg.FailurePercentageEjection == nil +} + +// onIntervalConfig handles logic required specifically on the receipt of a +// configuration which specifies to count RPC's and periodically perform passive +// health checking based on heuristics defined in configuration every configured +// interval. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onIntervalConfig() { + var interval time.Duration + if b.timerStartTime.IsZero() { + b.timerStartTime = time.Now() + for _, addrInfo := range b.addrs { + addrInfo.callCounter.clear() + } + interval = b.cfg.Interval + } else { + interval = b.cfg.Interval - now().Sub(b.timerStartTime) + if interval < 0 { + interval = 0 + } + } + b.intervalTimer = afterFunc(interval, b.intervalTimerAlgorithm) +} + +// onNoopConfig handles logic required specifically on the receipt of a +// configuration which specifies the balancer to be a noop. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onNoopConfig() { + // "If a config is provided with both the `success_rate_ejection` and + // `failure_percentage_ejection` fields unset, skip starting the timer and + // do the following:" + // "Unset the timer start timestamp." + b.timerStartTime = time.Time{} + for _, addrInfo := range b.addrs { + // "Uneject all currently ejected addresses." + if !addrInfo.latestEjectionTimestamp.IsZero() { + b.unejectAddress(addrInfo) + } + // "Reset each address's ejection time multiplier to 0." + addrInfo.ejectionTimeMultiplier = 0 + } +} + +func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + // Reject whole config if child policy doesn't exist, don't persist it for + // later. + bb := balancer.Get(lbCfg.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("outlier detection: child balancer %q not registered", lbCfg.ChildPolicy.Name) + } + + // It is safe to read b.cfg here without holding the mutex, as the only + // write to b.cfg happens later in this function. This function is part of + // the balancer.Balancer API, so it is guaranteed to be called in a + // synchronous manner, so it cannot race with this read. + if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() + } + + b.mu.Lock() + // Inhibit child picker updates until this UpdateClientConnState() call + // completes. If needed, a picker update containing the no-op config bit + // determined from this config and most recent state from the child will be + // sent synchronously upward at the end of this UpdateClientConnState() + // call. + b.inhibitPickerUpdates = true + b.updateUnconditionally = false + b.cfg = lbCfg + + addrs := make(map[string]bool, len(s.ResolverState.Addresses)) + for _, addr := range s.ResolverState.Addresses { + addrs[addr.Addr] = true + if _, ok := b.addrs[addr.Addr]; !ok { + b.addrs[addr.Addr] = newAddressInfo() + } + } + for addr := range b.addrs { + if !addrs[addr] { + delete(b.addrs, addr) + } + } + + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + + if b.noopConfig() { + b.onNoopConfig() + } else { + b.onIntervalConfig() + } + b.mu.Unlock() + + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() + + done := make(chan struct{}) + b.pickerUpdateCh.Put(lbCfgUpdate{ + lbCfg: lbCfg, + done: done, + }) + <-done + + return err +} + +func (b *outlierDetectionBalancer) ResolverError(err error) { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +} + +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + scw, ok := b.scWrappers[sc] + if !ok { + // Shouldn't happen if passed down a SubConnWrapper to child on SubConn + // creation. + b.logger.Errorf("UpdateSubConnState called with SubConn that has no corresponding SubConnWrapper") + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(b.scWrappers, scw.SubConn) + } + b.scUpdateCh.Put(&scUpdate{ + scw: scw, + state: state, + }) +} + +func (b *outlierDetectionBalancer) Close() { + b.closed.Fire() + <-b.done.Done() + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() + + b.mu.Lock() + defer b.mu.Unlock() + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } +} + +func (b *outlierDetectionBalancer) ExitIdle() { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +} + +// wrappedPicker delegates to the child policy's picker, and when the request +// finishes, it increments the corresponding counter in the map entry referenced +// by the subConnWrapper that was picked. If both the `success_rate_ejection` +// and `failure_percentage_ejection` fields are unset in the configuration, this +// picker will not count. +type wrappedPicker struct { + childPicker balancer.Picker + noopPicker bool +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := wp.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + done := func(di balancer.DoneInfo) { + if !wp.noopPicker { + incrementCounter(pr.SubConn, di) + } + if pr.Done != nil { + pr.Done(di) + } + } + scw, ok := pr.SubConn.(*subConnWrapper) + if !ok { + // This can never happen, but check is present for defensive + // programming. + logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") + return balancer.PickResult{ + SubConn: pr.SubConn, + Done: done, + }, nil + } + return balancer.PickResult{ + SubConn: scw.SubConn, + Done: done, + }, nil +} + +func incrementCounter(sc balancer.SubConn, info balancer.DoneInfo) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Shouldn't happen, as comes from child + return + } + + // scw.addressInfo and callCounter.activeBucket can be written to + // concurrently (the pointers themselves). Thus, protect the reads here with + // atomics to prevent data corruption. There exists a race in which you read + // the addressInfo or active bucket pointer and then that pointer points to + // deprecated memory. If this goroutine yields the processor, in between + // reading the addressInfo pointer and writing to the active bucket, + // UpdateAddresses can switch the addressInfo the scw points to. Writing to + // an outdated addresses is a very small race and tolerable. After reading + // callCounter.activeBucket in this picker a swap call can concurrently + // change what activeBucket points to. A50 says to swap the pointer, which + // will cause this race to write to deprecated memory the interval timer + // algorithm will never read, which makes this race alright. + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + ab := (*bucket)(atomic.LoadPointer(&addrInfo.callCounter.activeBucket)) + + if info.Err == nil { + atomic.AddUint32(&ab.numSuccesses, 1) + } else { + atomic.AddUint32(&ab.numFailures, 1) + } +} + +func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { + b.pickerUpdateCh.Put(s) +} + +func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + sc, err := b.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scw := &subConnWrapper{ + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + } + b.mu.Lock() + defer b.mu.Unlock() + b.scWrappers[sc] = scw + if len(addrs) != 1 { + return scw, nil + } + addrInfo, ok := b.addrs[addrs[0].Addr] + if !ok { + return scw, nil + } + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + if !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + return scw, nil +} + +func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*subConnWrapper) + if !ok { // Shouldn't happen + return + } + // Remove the wrapped SubConn from the parent Client Conn. We don't remove + // from map entry until we get a Shutdown state for the SubConn, as we need + // that data to forward that state down. + b.cc.RemoveSubConn(scw.SubConn) +} + +// appendIfPresent appends the scw to the address, if the address is present in +// the Outlier Detection balancers address map. Returns nil if not present, and +// the map entry if present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) appendIfPresent(addr string, scw *subConnWrapper) *addressInfo { + addrInfo, ok := b.addrs[addr] + if !ok { + return nil + } + + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + return addrInfo +} + +// removeSubConnFromAddressesMapEntry removes the scw from its map entry if +// present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) removeSubConnFromAddressesMapEntry(scw *subConnWrapper) { + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + for i, sw := range addrInfo.sws { + if scw == sw { + addrInfo.sws = append(addrInfo.sws[:i], addrInfo.sws[i+1:]...) + return + } + } +} + +func (b *outlierDetectionBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Return, shouldn't happen if passed up scw + return + } + + b.cc.UpdateAddresses(scw.SubConn, addrs) + b.mu.Lock() + defer b.mu.Unlock() + + // Note that 0 addresses is a valid update/state for a SubConn to be in. + // This is correctly handled by this algorithm (handled as part of a non singular + // old address/new address). + switch { + case len(scw.addresses) == 1 && len(addrs) == 1: // single address to single address + // If the updated address is the same, then there is nothing to do + // past this point. + if scw.addresses[0].Addr == addrs[0].Addr { + return + } + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo == nil { // uneject unconditionally because could have come from an ejected address + scw.uneject() + break + } + if addrInfo.latestEjectionTimestamp.IsZero() { // relay new updated subconn state + scw.uneject() + } else { + scw.eject() + } + case len(scw.addresses) == 1: // single address to multiple/no addresses + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo != nil { + addrInfo.callCounter.clear() + } + scw.uneject() + case len(addrs) == 1: // multiple/no addresses to single address + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo != nil && !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + } // otherwise multiple/no addresses to multiple/no addresses; ignore + + scw.addresses = addrs +} + +func (b *outlierDetectionBalancer) ResolveNow(opts resolver.ResolveNowOptions) { + b.cc.ResolveNow(opts) +} + +func (b *outlierDetectionBalancer) Target() string { + return b.cc.Target() +} + +func max(x, y int64) int64 { + if x < y { + return y + } + return x +} + +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +// handleSubConnUpdate stores the recent state and forward the update +// if the SubConn is not ejected. +func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { + scw := u.scw + scw.latestState = u.state + if !scw.ejected { + b.childMu.Lock() + b.child.UpdateSubConnState(scw, u.state) + b.childMu.Unlock() + } +} + +// handleEjectedUpdate handles any SubConns that get ejected/unejected, and +// forwards the appropriate corresponding subConnState to the child policy. +func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + b.childMu.Lock() + b.child.UpdateSubConnState(scw, stateToUpdate) + b.childMu.Unlock() +} + +// handleChildStateUpdate forwards the picker update wrapped in a wrapped picker +// with the noop picker bit present. +func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { + b.childState = u + b.mu.Lock() + if b.inhibitPickerUpdates { + // If a child's state is updated during the suppression of child + // updates, the synchronous handleLBConfigUpdate function with respect + // to UpdateClientConnState should return a picker unconditionally. + b.updateUnconditionally = true + b.mu.Unlock() + return + } + noopCfg := b.noopConfig() + b.mu.Unlock() + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) +} + +// handleLBConfigUpdate compares whether the new config is a noop config or not, +// to the noop bit in the picker if present. It updates the picker if this bit +// changed compared to the picker currently in use. +func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { + lbCfg := u.lbCfg + noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil + // If the child has sent it's first update and this config flips the noop + // bit compared to the most recent picker update sent upward, then a new + // picker with this updated bit needs to be forwarded upward. If a child + // update was received during the suppression of child updates within + // UpdateClientConnState(), then a new picker needs to be forwarded with + // this updated state, irregardless of whether this new configuration flips + // the bit. + if b.childState.Picker != nil && noopCfg != b.recentPickerNoop || b.updateUnconditionally { + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) + } + b.inhibitPickerUpdates = false + b.updateUnconditionally = false + close(u.done) +} + +func (b *outlierDetectionBalancer) run() { + defer b.done.Fire() + for { + select { + case update := <-b.scUpdateCh.Get(): + b.scUpdateCh.Load() + if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed + return + } + switch u := update.(type) { + case *scUpdate: + b.handleSubConnUpdate(u) + case *ejectionUpdate: + b.handleEjectedUpdate(u) + } + case update := <-b.pickerUpdateCh.Get(): + b.pickerUpdateCh.Load() + if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed + return + } + switch u := update.(type) { + case balancer.State: + b.handleChildStateUpdate(u) + case lbCfgUpdate: + b.handleLBConfigUpdate(u) + } + case <-b.closed.Done(): + return + } + } +} + +// intervalTimerAlgorithm ejects and unejects addresses based on the Outlier +// Detection configuration and data about each address from the previous +// interval. +func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { + b.mu.Lock() + defer b.mu.Unlock() + b.timerStartTime = time.Now() + + for _, addrInfo := range b.addrs { + addrInfo.callCounter.swap() + } + + if b.cfg.SuccessRateEjection != nil { + b.successRateAlgorithm() + } + + if b.cfg.FailurePercentageEjection != nil { + b.failurePercentageAlgorithm() + } + + for _, addrInfo := range b.addrs { + if addrInfo.latestEjectionTimestamp.IsZero() && addrInfo.ejectionTimeMultiplier > 0 { + addrInfo.ejectionTimeMultiplier-- + continue + } + if addrInfo.latestEjectionTimestamp.IsZero() { + // Address is already not ejected, so no need to check for whether + // to uneject the address below. + continue + } + et := b.cfg.BaseEjectionTime.Nanoseconds() * addrInfo.ejectionTimeMultiplier + met := max(b.cfg.BaseEjectionTime.Nanoseconds(), b.cfg.MaxEjectionTime.Nanoseconds()) + curTimeAfterEt := now().After(addrInfo.latestEjectionTimestamp.Add(time.Duration(min(et, met)))) + if curTimeAfterEt { + b.unejectAddress(addrInfo) + } + } + + // This conditional only for testing (since the interval timer algorithm is + // called manually), will never hit in production. + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + b.intervalTimer = afterFunc(b.cfg.Interval, b.intervalTimerAlgorithm) +} + +// addrsWithAtLeastRequestVolume returns a slice of address information of all +// addresses with at least request volume passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) addrsWithAtLeastRequestVolume(requestVolume uint32) []*addressInfo { + var addrs []*addressInfo + for _, addrInfo := range b.addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + if rv >= requestVolume { + addrs = append(addrs, addrInfo) + } + } + return addrs +} + +// meanAndStdDev returns the mean and std dev of the fractions of successful +// requests of the addresses passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) meanAndStdDev(addrs []*addressInfo) (float64, float64) { + var totalFractionOfSuccessfulRequests float64 + var mean float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + totalFractionOfSuccessfulRequests += float64(bucket.numSuccesses) / float64(rv) + } + mean = totalFractionOfSuccessfulRequests / float64(len(addrs)) + var sumOfSquares float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + devFromMean := (float64(bucket.numSuccesses) / float64(rv)) - mean + sumOfSquares += devFromMean * devFromMean + } + variance := sumOfSquares / float64(len(addrs)) + return mean, math.Sqrt(variance) +} + +// successRateAlgorithm ejects any addresses where the success rate falls below +// the other addresses according to mean and standard deviation, and if overall +// applicable from other set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) successRateAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.SuccessRateEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.SuccessRateEjection.MinimumHosts) { + return + } + mean, stddev := b.meanAndStdDev(addrsToConsider) + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.SuccessRateEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) + if successRate < (mean - stddev*(float64(ejectionCfg.StdevFactor)/1000)) { + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// failurePercentageAlgorithm ejects any addresses where the failure percentage +// rate exceeds a set enforcement percentage, if overall applicable from other +// set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.FailurePercentageEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.FailurePercentageEjection.MinimumHosts) { + return + } + + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.FailurePercentageEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 + if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected++ + addrInfo.latestEjectionTimestamp = b.timerStartTime + addrInfo.ejectionTimeMultiplier++ + for _, sbw := range addrInfo.sws { + sbw.eject() + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected-- + addrInfo.latestEjectionTimestamp = time.Time{} + for _, sbw := range addrInfo.sws { + sbw.uneject() + } +} + +// addressInfo contains the runtime information about an address that pertains +// to Outlier Detection. This struct and all of its fields is protected by +// outlierDetectionBalancer.mu in the case where it is accessed through the +// address map. In the case of Picker callbacks, the writes to the activeBucket +// of callCounter are protected by atomically loading and storing +// unsafe.Pointers (see further explanation in incrementCounter()). +type addressInfo struct { + // The call result counter object. + callCounter *callCounter + + // The latest ejection timestamp, or zero if the address is currently not + // ejected. + latestEjectionTimestamp time.Time + + // The current ejection time multiplier, starting at 0. + ejectionTimeMultiplier int64 + + // A list of subchannel wrapper objects that correspond to this address. + sws []*subConnWrapper +} + +func newAddressInfo() *addressInfo { + return &addressInfo{ + callCounter: newCallCounter(), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go new file mode 100644 index 0000000000000..4597f727b6e06 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/callcounter.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "unsafe" +) + +type bucket struct { + numSuccesses uint32 + numFailures uint32 +} + +func newCallCounter() *callCounter { + return &callCounter{ + activeBucket: unsafe.Pointer(&bucket{}), + inactiveBucket: &bucket{}, + } +} + +// callCounter has two buckets, which each count successful and failing RPC's. +// The activeBucket is used to actively count any finished RPC's, and the +// inactiveBucket is populated with this activeBucket's data every interval for +// use by the Outlier Detection algorithm. +type callCounter struct { + // activeBucket updates every time a call finishes (from picker passed to + // Client Conn), so protect pointer read with atomic load of unsafe.Pointer + // so picker does not have to grab a mutex per RPC, the critical path. + activeBucket unsafe.Pointer // bucket + inactiveBucket *bucket +} + +func (cc *callCounter) clear() { + atomic.StorePointer(&cc.activeBucket, unsafe.Pointer(&bucket{})) + cc.inactiveBucket = &bucket{} +} + +// "When the timer triggers, the inactive bucket is zeroed and swapped with the +// active bucket. Then the inactive bucket contains the number of successes and +// failures since the last time the timer triggered. Those numbers are used to +// evaluate the ejection criteria." - A50. +func (cc *callCounter) swap() { + ib := cc.inactiveBucket + *ib = bucket{} + ab := (*bucket)(atomic.SwapPointer(&cc.activeBucket, unsafe.Pointer(ib))) + cc.inactiveBucket = &bucket{ + numSuccesses: atomic.LoadUint32(&ab.numSuccesses), + numFailures: atomic.LoadUint32(&ab.numFailures), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 0000000000000..c931674ae4096 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "time" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval time.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime time.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime time.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go new file mode 100644 index 0000000000000..705b0cb6918d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[outlier-detection-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *outlierDetectionBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go new file mode 100644 index 0000000000000..8e25eb788b1d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/resolver" +) + +// subConnWrapper wraps every created SubConn in the Outlier Detection Balancer, +// to help track the latest state update from the underlying SubConn, and also +// whether or not this SubConn is ejected. +type subConnWrapper struct { + balancer.SubConn + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded + + // addresses is the list of address(es) this SubConn was created with to + // help support any change in address(es) + addresses []resolver.Address +} + +// eject causes the wrapper to report a state update with the TRANSIENT_FAILURE +// state, and to stop passing along updates from the underlying subchannel. +func (scw *subConnWrapper) eject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: true, + }) +} + +// uneject causes the wrapper to report a state update with the latest update +// from the underlying subchannel, and resume passing along updates from the +// underlying subchannel. +func (scw *subConnWrapper) uneject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: false, + }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 98fd0672af421..b5cace684960c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -30,6 +30,8 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" @@ -53,7 +55,6 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba b := &priorityBalancer{ cc: cc, done: grpcsync.NewEvent(), - childToPriority: make(map[string]int), children: make(map[string]*childBalancer), childBalancerStateUpdate: buffer.NewUnbounded(), } @@ -90,22 +91,17 @@ type priorityBalancer struct { mu sync.Mutex childInUse string - // priority of the child that's current in use. Int starting from 0, and 0 - // is the higher priority. - priorityInUse int // priorities is a list of child names from higher to lower priority. priorities []string - // childToPriority is a map from the child name to it's priority. Priority - // is an int start from 0, and 0 is the higher priority. - childToPriority map[string]int // children is a map from child name to sub-balancers. children map[string]*childBalancer - // The timer to give a priority some time to connect. And if the priority - // doesn't go into Ready/Failure, the next priority will be started. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *timerWrapper + + // Set during UpdateClientConnState when calling into sub-balancers. + // Prevents child updates from recomputing the active priority or sending + // an update of the aggregated picker to the parent. Cleared after all + // sub-balancers have finished UpdateClientConnState, after which + // syncPriority is called manually. + inhibitPickerUpdates bool } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { @@ -117,7 +113,6 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err addressesSplit := hierarchy.Group(s.ResolverState.Addresses) b.mu.Lock() - defer b.mu.Unlock() // Create and remove children, since we know all children from the config // are used by some priority. for name, newSubConfig := range newConfig.Children { @@ -152,15 +147,14 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err } // Update config and address, but note that this doesn't send the - // updates to child balancer (the child balancer might not be built, if - // it's a low priority). + // updates to non-started child balancers (the child balancer might not + // be built, if it's a low priority). currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) } - // Remove child from children if it's not in new config. for name, oldChild := range b.children { if _, ok := newConfig.Children[name]; !ok { @@ -170,13 +164,32 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // Update priorities and handle priority changes. b.priorities = newConfig.Priorities - b.childToPriority = make(map[string]int, len(newConfig.Priorities)) - for pi, pName := range newConfig.Priorities { - b.childToPriority[pName] = pi + + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + b.mu.Unlock() + return nil } - // Sync the states of all children to the new updated priorities. This - // include starting/stopping child balancers when necessary. - b.syncPriority() + + // This will sync the states of all children to the new updated + // priorities. Includes starting/stopping child balancers when necessary. + // Block picker updates until all children have had a chance to call + // UpdateState to prevent races where, e.g., the active priority reports + // transient failure but a higher priority may have reported something that + // made it active, and if the transient failure update is handled first, + // RPCs could fail. + b.inhibitPickerUpdates = true + // Add an item to queue to notify us when the current items in the queue + // are done and syncPriority has been called. + done := make(chan struct{}) + b.childBalancerStateUpdate.Put(resumePickerUpdates{done: done}) + b.mu.Unlock() + <-done return nil } @@ -198,31 +211,21 @@ func (b *priorityBalancer) Close() { // Clear states of the current child in use, so if there's a race in picker // update, it will be dropped. b.childInUse = "" - b.stopPriorityInitTimer() + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } } func (b *priorityBalancer) ExitIdle() { b.bg.ExitIdle() } -// stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it -// to nil. -// -// Caller must hold b.mu. -func (b *priorityBalancer) stopPriorityInitTimer() { - timerW := b.priorityInitTimer - if timerW == nil { - return - } - b.priorityInitTimer = nil - timerW.stopped = true - timerW.timer.Stop() -} - // UpdateState implements balancergroup.BalancerStateAggregator interface. The // balancer group sends new connectivity state and picker here. func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { - b.childBalancerStateUpdate.Put(&childBalancerState{ + b.childBalancerStateUpdate.Put(childBalancerState{ name: childName, s: state, }) @@ -233,6 +236,10 @@ type childBalancerState struct { s balancer.State } +type resumePickerUpdates struct { + done chan struct{} +} + // run handles child update in a separate goroutine, so if the child sends // updates inline (when called by parent), it won't cause deadlocks (by trying // to hold the same mutex). @@ -241,11 +248,22 @@ func (b *priorityBalancer) run() { select { case u := <-b.childBalancerStateUpdate.Get(): b.childBalancerStateUpdate.Load() - s := u.(*childBalancerState) // Needs to handle state update in a goroutine, because each state // update needs to start/close child policy, could result in // deadlock. - b.handleChildStateUpdate(s.name, s.s) + b.mu.Lock() + if b.done.HasFired() { + return + } + switch s := u.(type) { + case childBalancerState: + b.handleChildStateUpdate(s.name, s.s) + case resumePickerUpdates: + b.inhibitPickerUpdates = false + b.syncPriority(b.childInUse) + close(s.done) + } + b.mu.Unlock() case <-b.done.Done(): return } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go index 600705da01af8..34bab34c915cd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go @@ -19,6 +19,8 @@ package priority import ( + "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -36,7 +38,17 @@ type childBalancer struct { rState resolver.State started bool - state balancer.State + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + // The latest state the child balancer provided. + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper } // newChildBalancer creates a child balancer place holder, but doesn't @@ -63,11 +75,14 @@ func (cb *childBalancer) updateBuilder(bb balancer.Builder) { } // updateConfig sets childBalancer's config and state, but doesn't send update to -// the child balancer. +// the child balancer unless it is started. func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests cb.config = child.Config.Config cb.rState = rState + if cb.started { + cb.sendUpdate() + } } // start builds the child balancer if it's not already started. @@ -79,6 +94,8 @@ func (cb *childBalancer) start() { } cb.started = true cb.parent.bg.Add(cb.name, cb.bb) + cb.startInitTimer() + cb.sendUpdate() } // sendUpdate sends the addresses and config to the child balancer. @@ -103,10 +120,46 @@ func (cb *childBalancer) stop() { if !cb.started { return } + cb.stopInitTimer() cb.parent.bg.Remove(cb.name) cb.started = false cb.state = balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority("") + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go index 3a18f6e10d83b..c12dfe47ffea4 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -23,7 +23,6 @@ import ( "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" ) @@ -36,9 +35,10 @@ var ( DefaultPriorityInitTimeout = 10 * time.Second ) -// syncPriority handles priority after a config update. It makes sure the -// balancer state (started or not) is in sync with the priorities (even in -// tricky cases where a child is moved from a priority to another). +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). // // It's guaranteed that after this function returns: // - If some child is READY, it is childInUse, and all lower priorities are @@ -53,10 +53,13 @@ var ( // set parent ClientConn to TransientFailure // - Otherwise, Scan all children from p0, and check balancer stats: // - For any of the following cases: -// - If balancer is not started (not built), this is either a new child -// with high priority, or a new builder for an existing child. -// - If balancer is READY -// - If this is the lowest priority +// - If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// - If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// - If balancer is READY or IDLE +// - If this is the lowest priority // - do the following: // - if this is not the old childInUse, override picker so old picker is no // longer used. @@ -64,21 +67,11 @@ var ( // - forward the new addresses and config // // Caller must hold b.mu. -func (b *priorityBalancer) syncPriority() { - // Everything was removed by the update. - if len(b.priorities) == 0 { - b.childInUse = "" - b.priorityInUse = 0 - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - b.stopPriorityInitTimer() - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), - }) +func (b *priorityBalancer) syncPriority(childUpdating string) { + if b.inhibitPickerUpdates { + b.logger.Infof("Skipping update from child with name %q", childUpdating) return } - for p, name := range b.priorities { child, ok := b.children[name] if !ok { @@ -89,16 +82,16 @@ func (b *priorityBalancer) syncPriority() { if !child.started || child.state.ConnectivityState == connectivity.Ready || child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { - if b.childInUse != "" && b.childInUse != child.name { - // childInUse was set and is different from this child, will - // change childInUse later. We need to update picker here - // immediately so parent stops using the old picker. + if b.childInUse != child.name || child.name == childUpdating { + b.logger.Warningf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + // If we switch children or the child in use just updated its + // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) b.switchToChild(child, p) - child.sendUpdate() break } } @@ -123,8 +116,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { // - stop all child with lower priorities // - if childInUse is not this child // - set childInUse to this child -// - stops init timer -// - if this child is not started, start it, and start a init timer +// - if this child is not started, start it // // Note that it does NOT send the current child state (picker) to the parent // ClientConn. The caller needs to send it if necessary. @@ -154,209 +146,41 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { return } b.childInUse = child.name - b.priorityInUse = priority - - // Init timer is always for childInUse. Since we are switching to a - // different child, we will stop the init timer no matter what. If this - // child is not started, we will start the init timer later. - b.stopPriorityInitTimer() if !child.started { child.start() - // Need this local variable to capture timerW in the AfterFunc closure - // to check the stopped boolean. - timerW := &timerWrapper{} - b.priorityInitTimer = timerW - timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { - b.mu.Lock() - defer b.mu.Unlock() - if timerW.stopped { - return - } - b.priorityInitTimer = nil - // Switch to the next priority if there's any. - if pNext := priority + 1; pNext < len(b.priorities) { - nameNext := b.priorities[pNext] - if childNext, ok := b.children[nameNext]; ok { - b.switchToChild(childNext, pNext) - childNext.sendUpdate() - } - } - }) } } // handleChildStateUpdate start/close priorities based on the connectivity // state. func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { - b.mu.Lock() - defer b.mu.Unlock() - if b.done.HasFired() { - return - } - - priority, ok := b.childToPriority[childName] - if !ok { - b.logger.Warningf("priority: received picker update with unknown child %v", childName) - return - } - - if b.childInUse == "" { - b.logger.Warningf("priority: no child is in use when picker update is received") - return - } - - // priorityInUse is higher than this priority. - if b.priorityInUse < priority { - // Lower priorities should all be closed, this is an unexpected update. - // Can happen if the child policy sends an update after we tell it to - // close. - b.logger.Warningf("priority: received picker update from priority %v, lower than priority in use %v", priority, b.priorityInUse) - return - } - // Update state in child. The updated picker will be sent to parent later if // necessary. child, ok := b.children[childName] if !ok { - b.logger.Warningf("priority: child balancer not found for child %v, priority %v", childName, priority) + b.logger.Warningf("priority: child balancer not found for child %v", childName) return } - oldState := child.state.ConnectivityState child.state = s + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: - // Note that idle is also handled as if it's Ready. It will close the - // lower priorities (which will be kept in a cache, not deleted), and - // new picks will use the Idle picker. - b.handlePriorityWithNewStateReady(child, priority) + child.reportedTF = false + child.stopInitTimer() case connectivity.TransientFailure: - b.handlePriorityWithNewStateTransientFailure(child, priority) + child.reportedTF = true + child.stopInitTimer() case connectivity.Connecting: - b.handlePriorityWithNewStateConnecting(child, priority, oldState) + if !child.reportedTF { + child.startInitTimer() + } default: // New state is Shutdown, should never happen. Don't forward. } -} - -// handlePriorityWithNewStateReady handles state Ready from a higher or equal -// priority. -// -// An update with state Ready: -// - If it's from higher priority: -// - Switch to this priority -// - Forward the update -// - If it's from priorityInUse: -// - Forward only -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateReady(child *childBalancer, priority int) { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, priorityInUse - // will be closed, and the init timer will become useless. - b.stopPriorityInitTimer() - - // priorityInUse is lower than this priority, switch to this. - if b.priorityInUse > priority { - b.logger.Infof("Switching priority from %v to %v, because latter became Ready", b.priorityInUse, priority) - b.switchToChild(child, priority) - } - // Forward the update since it's READY. - b.cc.UpdateState(child.state) -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure -// from a higher or equal priority. -// -// An update with state TransientFailure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Switch to the lower -// - Forward the lower child's state -// - Do NOT forward this update -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateTransientFailure(child *childBalancer, priority int) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - // priorityInUse sends a failure. Stop its init timer. - b.stopPriorityInitTimer() - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return - } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() -} -// handlePriorityWithNewStateConnecting handles state Connecting from a higher -// than or equal priority. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorithm (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBalancer, priority int, oldState connectivity.State) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - - switch oldState { - case connectivity.Ready: - // Handling transition from Ready to Connecting, is same as handling - // TransientFailure. There's no need to stop the init timer, because it - // should have been stopped when state turned Ready. - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return - } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() - case connectivity.Idle: - b.cc.UpdateState(child.state) - default: - // Old state is Connecting, TransientFailure or Shutdown. Don't forward. - } + child.parent.syncPriority(childName) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go index 64a1d467f5545..3e0f0adf58eb9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go @@ -32,3 +32,7 @@ var logger = grpclog.Component("xds") func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } + +func subConnPrefixLogger(p *ringhashBalancer, sc *subConn) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)+fmt.Sprintf("[subConn %p] ", sc)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go index dcea6d46e5172..ec3b5605690d1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -143,6 +143,8 @@ func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, erro return balancer.PickResult{}, fmt.Errorf("no connection is Ready") } +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { for next := ring.next(entry); next != entry; next = ring.next(next) { if next.sc != entry.sc { @@ -152,3 +154,28 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { // There's no qualifying next entry. return nil } + +// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's +// different from the given subconn. +func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { + var entry *ringEntry + for _, it := range ring.items { + if it.sc == sc { + entry = it + break + } + } + if entry == nil { + // If the given subconn is not in the ring (e.g. it was deleted), return + // the first one. + if len(ring.items) > 0 { + return ring.items[0].sc + } + return nil + } + ee := nextSkippingDuplicates(ring, entry) + if ee == nil { + return nil + } + return ee.sc +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go index 68e844cfb4873..71d31eaeb8b01 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -19,7 +19,6 @@ package ringhash import ( - "fmt" "math" "sort" "strconv" @@ -43,8 +42,8 @@ type ringEntry struct { sc *subConn } -// newRing creates a ring from the subConns. The ring size is limited by the -// passed in max/min. +// newRing creates a ring from the subConns stored in the AddressMap. The ring +// size is limited by the passed in max/min. // // ring entries will be created for each subConn, and subConn with high weight // (specified by the address) may have multiple entries. @@ -64,12 +63,12 @@ type ringEntry struct { // // To pick from a ring, a binary search will be done for the given target hash, // and first item with hash >= given hash will be returned. -func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize uint64) (*ring, error) { +// +// Must be called with a non-empty subConns map. +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64) *ring { // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 - normalizedWeights, minWeight, err := normalizeWeights(subConns) - if err != nil { - return nil, err - } + normalizedWeights, minWeight := normalizeWeights(subConns) + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. // Scale up the size of the ring such that the least-weighted host gets a @@ -95,7 +94,7 @@ func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize ui for _, scw := range normalizedWeights { targetIdx += scale * scw.weight for float64(idx) < targetIdx { - h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(len(items))) + h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(idx)) items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) idx++ } @@ -106,31 +105,30 @@ func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize ui for i, ii := range items { ii.idx = i } - return &ring{items: items}, nil + return &ring{items: items} } // normalizeWeights divides all the weights by the sum, so that the total weight // is 1. -func normalizeWeights(subConns map[resolver.Address]*subConn) (_ []subConnWithWeight, min float64, _ error) { - if len(subConns) == 0 { - return nil, 0, fmt.Errorf("number of subconns is 0") - } +// +// Must be called with a non-empty subConns map. +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { var weightSum uint32 - for a := range subConns { - // The address weight was moved from attributes to the Metadata field. - // This is necessary (all the attributes need to be stripped) for the - // balancer to detect identical {address+weight} combination. - weightSum += a.Metadata.(uint32) - } - if weightSum == 0 { - return nil, 0, fmt.Errorf("total weight of all subconns is 0") + keys := subConns.Keys() + for _, a := range keys { + weightSum += getWeightAttribute(a) } - weightSumF := float64(weightSum) - ret := make([]subConnWithWeight, 0, len(subConns)) - min = math.MaxFloat64 - for a, sc := range subConns { - nw := float64(a.Metadata.(uint32)) / weightSumF - ret = append(ret, subConnWithWeight{sc: sc, weight: nw}) + ret := make([]subConnWithWeight, 0, len(keys)) + min := float64(1.0) + for _, a := range keys { + v, _ := subConns.Get(a) + scInfo := v.(*subConn) + // getWeightAttribute() returns 1 if the weight attribute is not found + // on the address. And since this function is guaranteed to be called + // with a non-empty subConns map, weightSum is guaranteed to be + // non-zero. So, we need not worry about divide a by zero error here. + nw := float64(getWeightAttribute(a)) / float64(weightSum) + ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) if nw < min { min = nw } @@ -142,7 +140,7 @@ func normalizeWeights(subConns map[resolver.Address]*subConn) (_ []subConnWithWe // where an address is added and then removed, the RPCs will still pick the // same old SubConn. sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) - return ret, min, nil + return ret, min } // pick does a binary search. It returns the item with smallest index i that diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index f8a47f165bdfe..59ccd0127a2a9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -47,7 +47,7 @@ type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &ringhashBalancer{ cc: cc, - subConns: make(map[resolver.Address]*subConn), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]*subConn), csEvltr: &connectivityStateEvaluator{}, } @@ -65,8 +65,10 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err } type subConn struct { - addr string - sc balancer.SubConn + addr string + weight uint32 + sc balancer.SubConn + logger *grpclog.PrefixLogger mu sync.RWMutex // This is the actual state of this SubConn (as updated by the ClientConn). @@ -98,6 +100,10 @@ type subConn struct { // When connectivity state is updated to Idle for this SubConn, if // connectQueued is true, Connect() will be called on the SubConn. connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool } // setState updates the state of this SubConn. @@ -112,7 +118,10 @@ func (sc *subConn) setState(s connectivity.State) { // Trigger Connect() if new state is Idle, and there is a queued connect. if sc.connectQueued { sc.connectQueued = false + sc.logger.Infof("Executing a queued connect for subConn moving to state: %v", sc.state) sc.sc.Connect() + } else { + sc.attemptingToConnect = false } case connectivity.Connecting: // Clear connectQueued if the SubConn isn't failing. This state @@ -122,11 +131,14 @@ func (sc *subConn) setState(s connectivity.State) { // Clear connectQueued if the SubConn isn't failing. This state // transition is unlikely to happen, but handle this just in case. sc.connectQueued = false + sc.attemptingToConnect = false // Set to a non-failing state. sc.failing = false case connectivity.TransientFailure: // Set to a failing state. sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false } sc.state = s } @@ -149,22 +161,30 @@ func (sc *subConn) effectiveState() connectivity.State { func (sc *subConn) queueConnect() { sc.mu.Lock() defer sc.mu.Unlock() + sc.attemptingToConnect = true if sc.state == connectivity.Idle { + sc.logger.Infof("Executing a queued connect for subConn in state: %v", sc.state) sc.sc.Connect() return } // Queue this connect, and when this SubConn switches back to Idle (happens // after backoff in TransientFailure), it will Connect(). + sc.logger.Infof("Queueing a connect for subConn in state: %v", sc.state) sc.connectQueued = true } +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + type ringhashBalancer struct { cc balancer.ClientConn logger *grpclog.PrefixLogger - config *LBConfig - - subConns map[resolver.Address]*subConn // `attributes` is stripped from the keys of this map (the addresses) + config *LBConfig + subConns *resolver.AddressMap // Map from resolver.Address to `*subConn`. scStates map[balancer.SubConn]*subConn // ring is always in sync with subConns. When subConns change, a new ring is @@ -192,55 +212,48 @@ type ringhashBalancer struct { // SubConn states are Idle. func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { var addrsUpdated bool - // addrsSet is the set converted from addrs, it's used for quick lookup of - // an address. - // - // Addresses in this map all have attributes stripped, but metadata set to - // the weight. So that weight change can be detected. - // - // TODO: this won't be necessary if there are ways to compare address - // attributes. - addrsSet := make(map[resolver.Address]struct{}) - for _, a := range addrs { - aNoAttrs := a - // Strip attributes but set Metadata to the weight. - aNoAttrs.Attributes = nil - w := weightedroundrobin.GetAddrInfo(a).Weight - if w == 0 { - // If weight is not set, use 1. - w = 1 - } - aNoAttrs.Metadata = w - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + // addrsSet is the set converted from addrs, used for quick lookup. + addrsSet := resolver.NewAddressMap() + for _, addr := range addrs { + addrsSet.Set(addr, true) + newWeight := getWeightAttribute(addr) + if val, ok := b.subConns.Get(addr); !ok { + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) if err != nil { - logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + b.logger.Warningf("Failed to create new SubConn: %v", err) continue } - scs := &subConn{addr: a.Addr, sc: sc} + scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} + scs.logger = subConnPrefixLogger(b, scs) scs.setState(connectivity.Idle) b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) - b.subConns[aNoAttrs] = scs + b.subConns.Set(addr, scs) b.scStates[sc] = scs addrsUpdated = true } else { - // Always update the subconn's address in case the attributes - // changed. The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.sc, []resolver.Address{a}) + // We have seen this address before and created a subConn for it. If the + // weight associated with the address has changed, update the subConns map + // with the new weight. This will be used when a new ring is created. + // + // There is no need to call UpdateAddresses on the subConn at this point + // since *only* the weight attribute has changed, and that does not affect + // subConn uniqueness. + scInfo := val.(*subConn) + if oldWeight := scInfo.weight; oldWeight != newWeight { + scInfo.weight = newWeight + b.subConns.Set(addr, scInfo) + // Return true to force recreation of the ring. + addrsUpdated = true + } } } - for a, scInfo := range b.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { + for _, addr := range b.subConns.Keys() { + // addr was removed by resolver. + if _, ok := addrsSet.Get(addr); !ok { + v, _ := b.subConns.Get(addr) + scInfo := v.(*subConn) b.cc.RemoveSubConn(scInfo.sc) - delete(b.subConns, a) + b.subConns.Delete(addr) addrsUpdated = true // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -251,28 +264,22 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) - if b.config == nil { - newConfig, ok := s.BalancerConfig.(*LBConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) - } - b.config = newConfig + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - // Successful resolution; clear resolver error and ensure we return nil. - b.resolverErr = nil - if b.updateAddresses(s.ResolverState.Addresses) { - // If addresses were updated, no matter whether it resulted in SubConn - // creation/deletion, or just weight update, we will need to regenerate - // the ring. - var err error - b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) - if err != nil { - panic(err) - } - b.regeneratePicker() - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + // If addresses were updated, whether it resulted in SubConn + // creation/deletion, or just weight update, we need to regenerate the ring + // and send a new picker. + regenerateRing := b.updateAddresses(s.ResolverState.Addresses) + + // If the ring configuration has changed, we need to regenerate the ring and + // send a new picker. + if b.config == nil || b.config.MinRingSize != newConfig.MinRingSize || b.config.MaxRingSize != newConfig.MaxRingSize { + regenerateRing = true } + b.config = newConfig // If resolver state contains no addresses, return an error so ClientConn // will trigger re-resolve. Also records this as an resolver error, so when @@ -282,12 +289,23 @@ func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) err b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + if regenerateRing { + // Ring creation is guaranteed to not fail because we call newRing() + // with a non-empty subConns map. + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // Successful resolution; clear resolver error and return nil. + b.resolverErr = nil return nil } func (b *ringhashBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -315,15 +333,18 @@ func (b *ringhashBalancer) ResolverError(err error) { // for some RPCs. func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - b.logger.Infof("handle SubConn state change: %p, %v", sc, s) + if logger.V(2) { + b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) + } scs, ok := b.scStates[sc] if !ok { - b.logger.Infof("got state changes for an unknown SubConn: %p, %v", sc, s) + b.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) return } oldSCState := scs.effectiveState() scs.setState(s) newSCState := scs.effectiveState() + b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) var sendUpdate bool oldBalancerState := b.state @@ -334,27 +355,21 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance switch s { case connectivity.Idle: - // When the overall state is TransientFailure, this will never get picks - // if there's a lower priority. Need to keep the SubConns connecting so - // there's a chance it will recover. - if b.state == connectivity.TransientFailure { - scs.queueConnect() - } // No need to send an update. No queued RPC can be unblocked. If the // overall state changed because of this, sendUpdate is already true. case connectivity.Connecting: // No need to send an update. No queued RPC can be unblocked. If the // overall state changed because of this, sendUpdate is already true. case connectivity.Ready: - // Resend the picker, there's no need to regenerate the picker because - // the ring didn't change. + // We need to regenerate the picker even if the ring has not changed + // because we could be moving from TRANSIENT_FAILURE to READY, in which + // case, we need to update the error picker returned earlier. + b.regeneratePicker() sendUpdate = true case connectivity.TransientFailure: // Save error to be reported via picker. b.connErr = state.ConnectionError - // Regenerate picker to update error message. b.regeneratePicker() - sendUpdate = true case connectivity.Shutdown: // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. @@ -362,8 +377,46 @@ func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balance } if sendUpdate { + b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, v := range b.subConns.Values() { + sc := v.(*subConn) + if sc.isAttemptingToConnect() { + return + } + } + // Trigger a SubConn (this updated SubConn's next SubConn in the ring) + // to connect if nobody is attempting to connect. + sc := nextSkippingDuplicatesSubConn(b.ring, scs) + if sc != nil { + sc.queueConnect() + return + } + // This handles the edge case where we have a single subConn in the + // ring. nextSkippingDuplicatesSubCon() would have returned nil. We + // still need to ensure that some subConn is attempting to connect, in + // order to give the LB policy a chance to move out of + // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn. + scs.queueConnect() + } } // mergeErrors builds an error from the last connection error and the last @@ -390,11 +443,17 @@ func (b *ringhashBalancer) regeneratePicker() { func (b *ringhashBalancer) Close() {} +func (b *ringhashBalancer) ExitIdle() { + // ExitIdle implementation is a no-op because connections are either + // triggers from picks or from subConn state changes. +} + // connectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // // It's not thread safe. type connectivityStateEvaluator struct { + sum uint64 nums [5]uint64 } @@ -404,6 +463,7 @@ type connectivityStateEvaluator struct { // - If there is at least one subchannel in READY state, report READY. // - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. // - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. // - If there is at least one subchannel in Idle state, report Idle. // - Otherwise, report TRANSIENT_FAILURE. // @@ -417,6 +477,14 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. cse.nums[state] += updateVal } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } if cse.nums[connectivity.Ready] > 0 { return connectivity.Ready @@ -427,8 +495,26 @@ func (cse *connectivityStateEvaluator) recordTransition(oldState, newState conne if cse.nums[connectivity.Connecting] > 0 { return connectivity.Connecting } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } if cse.nums[connectivity.Idle] > 0 { return connectivity.Idle } return connectivity.TransientFailure } + +// getWeightAttribute is a convenience function which returns the value of the +// weight attribute stored in the BalancerAttributes field of addr, using the +// weightedroundrobin package. +// +// When used in the xDS context, the weight attribute is guaranteed to be +// non-zero. But, when used in a non-xDS context, the weight attribute could be +// unset. A Default of 1 is used in the latter case. +func getWeightAttribute(addr resolver.Address) uint32 { + w := weightedroundrobin.GetAddrInfo(addr).Weight + if w == 0 { + return 1 + } + return w +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go index 54776f20cf0b9..b95a101116ed0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go @@ -37,7 +37,7 @@ type ClusterSpecifier interface { // ParseClusterSpecifierConfig parses the provided configuration // proto.Message from the top level RDS configuration. The resulting // BalancerConfig will be used as configuration for a child LB Policy of the - // Cluster Manager LB Policy. + // Cluster Manager LB Policy. A nil BalancerConfig is invalid. ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) } @@ -65,3 +65,8 @@ func Register(cs ClusterSpecifier) { func Get(typeURL string) ClusterSpecifier { return m[typeURL] } + +// UnregisterForTesting unregisters the ClusterSpecifier for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go new file mode 100644 index 0000000000000..a167cc5fa2c9a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS cluster specifier plugin. +package rls + +import ( + "encoding/json" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + if envconfig.XDSRLS { + clusterspecifier.Register(rls{}) + } + + // TODO: Remove these once the RLS env var is removed. + internal.RegisterRLSClusterSpecifierPluginForTesting = func() { + clusterspecifier.Register(rls{}) + } + internal.UnregisterRLSClusterSpecifierPluginForTesting = func() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } + } +} + +type rls struct{} + +func (rls) TypeURLs() []string { + return []string{"type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier"} +} + +// lbConfigJSON is the RLS LB Policies configuration in JSON format. +// RouteLookupConfig will be a raw JSON string from the passed in proto +// configuration, and the other fields will be hardcoded. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage `json:"routeLookupConfig"` + ChildPolicy []map[string]json.RawMessage `json:"childPolicy"` + ChildPolicyConfigTargetFieldName string `json:"childPolicyConfigTargetFieldName"` +} + +func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rls_csp: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) + } + rlcs := new(rlspb.RouteLookupClusterSpecifier) + + if err := ptypes.UnmarshalAny(any, rlcs); err != nil { + return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) + } + rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling route lookup config: %v: %v", rlcs.GetRouteLookupConfig(), err) + } + lbCfgJSON := &lbConfigJSON{ + RouteLookupConfig: rlcJSON, // "JSON form of RouteLookupClusterSpecifier.config" - RLS in xDS Design Doc + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": json.RawMessage("{}"), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", + } + + rawJSON, err := json.Marshal(lbCfgJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) + } + + rlsBB := balancer.Get(internal.RLSLoadBalancingPolicyName) + if rlsBB == nil { + return nil, fmt.Errorf("RLS LB policy not registered") + } + if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing %v", err) + } + + return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go index bb85dc80d4603..209283c3bf598 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -27,6 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/xds/rbac" @@ -41,21 +42,15 @@ func init() { if envconfig.XDSRBAC { httpfilter.Register(builder{}) } -} - -// RegisterForTesting registers the RBAC HTTP Filter for testing purposes, regardless -// of the RBAC environment variable. This is needed because there is no way to set the RBAC -// environment variable to true in a test before init() in this package is run. -func RegisterForTesting() { - httpfilter.Register(builder{}) -} -// UnregisterForTesting unregisters the RBAC HTTP Filter for testing purposes. This is needed because -// there is no way to unregister the HTTP Filter after registering it solely for testing purposes using -// rbac.RegisterForTesting() -func UnregisterForTesting() { - for _, typeURL := range builder.TypeURLs(builder{}) { - httpfilter.UnregisterForTesting(typeURL) + // TODO: Remove these once the RBAC env var is removed. + internal.RegisterRBACHTTPFilterForTesting = func() { + httpfilter.Register(builder{}) + } + internal.UnregisterRBACHTTPFilterForTesting = func() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } } } diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index fd75af2104576..d1dd79354ae0c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -245,9 +245,8 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [ generatedHash = true generatedPolicyHash = true case xdsresource.HashPolicyTypeChannelID: - // Hash the ClientConn pointer which logically uniquely - // identifies the client. - policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) + // Use the static channel ID as the hash for this policy. + policyHash = cs.r.channelID generatedHash = true generatedPolicyHash = true } diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index 3db9be1cac073..4f8609ce9df52 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -58,6 +58,10 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. +// +// TODO(easwars): Make this function a method on the xdsResolver type. +// Currently, there is a single call site for this function, and all arguments +// passed to it are fields of the xdsResolver type. func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, @@ -139,7 +143,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, // update before reporting this LDS config. if w.lastUpdate.virtualHost != nil { // We want to send an update with the new fields from the new LDS - // (e.g. max stream duration), and old fields from the the previous + // (e.g. max stream duration), and old fields from the previous // RDS. // // But note that this should only happen when virtual host is set, diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index c4b147d21efb4..f473fcbaa733a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -25,7 +25,9 @@ import ( "strings" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" @@ -37,10 +39,10 @@ import ( const xdsScheme = "xds" -// NewBuilderForTesting creates a new xds resolver builder using a specific xds +// newBuilderForTesting creates a new xds resolver builder using a specific xds // bootstrap config, so tests can use multiple xds clients in different // ClientConns at the same time. -func NewBuilderForTesting(config []byte) (resolver.Builder, error) { +func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ newXDSClient: func() (xdsclient.XDSClient, error) { return xdsclient.NewWithBootstrapContentsForTesting(config) @@ -53,6 +55,7 @@ var newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() func init() { resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting } type xdsResolverBuilder struct { @@ -63,13 +66,13 @@ type xdsResolverBuilder struct { // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. -func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { r := &xdsResolver{ - target: t, cc: cc, closed: grpcsync.NewEvent(), updateCh: make(chan suWithError, 1), activeClusters: make(map[string]*clusterInfo), + channelID: grpcrand.Uint64(), } defer func() { if retErr != nil { @@ -77,7 +80,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op } }() r.logger = prefixLogger(r) - r.logger.Infof("Creating resolver for target: %+v", t) + r.logger.Infof("Creating resolver for target: %+v", target) newXDSClient := newXDSClient if b.newXDSClient != nil { @@ -115,7 +118,7 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate - if authority := r.target.URL.Host; authority != "" { + if authority := target.URL.Host; authority != "" { a := bootstrapConfig.Authorities[authority] if a == nil { return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority) @@ -127,19 +130,19 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op template = a.ClientListenerResourceNameTemplate } } - endpoint := r.target.URL.Path + endpoint := target.URL.Path if endpoint == "" { - endpoint = r.target.URL.Opaque + endpoint = target.URL.Opaque } endpoint = strings.TrimPrefix(endpoint, "/") - resourceName := bootstrap.PopulateResourceTemplate(template, endpoint) + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, endpoint) - // Register a watch on the xdsClient for the user's dial target. - cancelWatch := watchService(r.client, resourceName, r.handleServiceUpdate, r.logger) - r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) + // Register a watch on the xdsClient for the resource name determined above. + cancelWatch := watchService(r.client, r.ldsResourceName, r.handleServiceUpdate, r.logger) + r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.client) r.cancelWatch = func() { cancelWatch() - r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.target.Endpoint, r.client) + r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.client) } go r.run() @@ -165,11 +168,10 @@ type suWithError struct { // (which performs LDS/RDS queries for the same), and passes the received // updates to the ClientConn. type xdsResolver struct { - target resolver.Target - cc resolver.ClientConn - closed *grpcsync.Event - - logger *grpclog.PrefixLogger + cc resolver.ClientConn + closed *grpcsync.Event + logger *grpclog.PrefixLogger + ldsResourceName string // The underlying xdsClient which performs all xDS requests and responses. client xdsclient.XDSClient @@ -184,6 +186,10 @@ type xdsResolver struct { activeClusters map[string]*clusterInfo curConfigSelector *configSelector + + // A random number which uniquely identifies the channel which owns this + // resolver. + channelID uint64 } // sendNewServiceConfig prunes active clusters, generates a new service config @@ -212,7 +218,7 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, pretty.FormatJSON(sc)) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.client, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ @@ -231,7 +237,7 @@ func (r *xdsResolver) run() { return case update := <-r.updateCh: if update.err != nil { - r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) + r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.client, update.err) if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service @@ -259,7 +265,7 @@ func (r *xdsResolver) run() { // Create the config selector for this update. cs, err := r.newConfigSelector(update.su) if err != nil { - r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.target.Endpoint, r.client, err) + r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.client, err) r.cc.ReportError(err) continue } diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index c90f9672ea32b..c6ab885fcf904 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -111,6 +111,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru drainCallback: params.DrainCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), + mode: connectivity.ServingModeStarting, closed: grpcsync.NewEvent(), goodUpdate: grpcsync.NewEvent(), ldsUpdateCh: make(chan ldsUpdateWithError, 1), @@ -429,14 +430,24 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { } } +// switchMode updates the value of serving mode and filter chains stored in the +// listenerWrapper. And if the serving mode has changed, it invokes the +// registered mode change callback. func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() l.filterChains = fcs + if l.mode == newMode && l.mode == connectivity.ServingModeServing { + // Redundant updates are suppressed only when we are SERVING and the new + // mode is also SERVING. In the other case (where we are NOT_SERVING and the + // new mode is also NOT_SERVING), the update is not suppressed as: + // 1. the error may have change + // 2. it provides a timestamp of the last backoff attempt + return + } l.mode = newMode if l.modeCallback != nil { l.modeCallback(l.Listener.Addr(), newMode, err) } - l.logger.Warningf("Listener %q entering mode: %q due to error: %v", l.Addr(), newMode, err) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go index 5141816273619..9076a76fd0dc0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go @@ -17,36 +17,12 @@ package xdsclient -import ( - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) +import "google.golang.org/grpc/resolver" type clientKeyType string const clientKey = clientKeyType("grpc.xds.internal.client.Client") -// XDSClient is a full fledged gRPC client which queries a set of discovery APIs -// (collectively termed as xDS) on a remote management server, to discover -// various dynamic resources. -type XDSClient interface { - WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() - WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() - WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) - - DumpLDS() map[string]xdsresource.UpdateWithMD - DumpRDS() map[string]xdsresource.UpdateWithMD - DumpCDS() map[string]xdsresource.UpdateWithMD - DumpEDS() map[string]xdsresource.UpdateWithMD - - BootstrapConfig() *bootstrap.Config - Close() -} - // FromResolverState returns the Client from state, or nil if not present. func FromResolverState(state resolver.State) XDSClient { cs, _ := state.Attributes.Value(clientKey).(XDSClient) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 1a236849c377a..817cb7338f5a5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -18,126 +18,12 @@ package xdsclient import ( - "errors" - "fmt" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/pubsub" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// findAuthority returns the authority for this name. If it doesn't already -// exist, one will be created. -// -// Note that this doesn't always create new authority. authorities with the same -// config but different names are shared. -// -// The returned unref function must be called when the caller is done using this -// authority, without holding c.authorityMu. -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { - scheme, authority := n.Scheme, n.Authority - - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if c.done.HasFired() { - return nil, nil, errors.New("the xds-client is closed") - } - - config := c.config.XDSServer - if scheme == xdsresource.FederationScheme { - cfg, ok := c.config.Authorities[authority] - if !ok { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - config = cfg.XDSServer - } - - a, err := c.newAuthority(config) - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) - } - // All returned authority from this function will be used by a watch, - // holding the ref here. - // - // Note that this must be done while c.authorityMu is held, to avoid the - // race that an authority is returned, but before the watch starts, the - // old last watch is canceled (in another goroutine), causing this - // authority to be removed, and then a watch will start on a removed - // authority. - // - // unref() will be done when the watch is canceled. - a.ref() - return a, func() { c.unrefAuthority(a) }, nil -} - -// newAuthority creates a new authority for the config. But before that, it -// checks the cache to see if an authority for this config already exists. -// -// The caller must take a reference of the returned authority before using, and -// unref afterwards. -// -// caller must hold c.authorityMu -func (c *clientImpl) newAuthority(config *bootstrap.ServerConfig) (_ *authority, retErr error) { - // First check if there's already an authority for this config. If found, it - // means this authority is used by other watches (could be the same - // authority name, or a different authority name but the same server - // config). Return it. - configStr := config.String() - if a, ok := c.authorities[configStr]; ok { - return a, nil - } - // Second check if there's an authority in the idle cache. If found, it - // means this authority was created, but moved to the idle cache because the - // watch was canceled. Move it from idle cache to the authority cache, and - // return. - if old, ok := c.idleAuthorities.Remove(configStr); ok { - oldA, _ := old.(*authority) - if oldA != nil { - c.authorities[configStr] = oldA - return oldA, nil - } - } - - // Make a new authority since there's no existing authority for this config. - ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, c.logger)} - defer func() { - if retErr != nil { - ret.close() - } - }() - ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger) - if err != nil { - return nil, err - } - ret.controller = ctr - // Add it to the cache, so it will be reused. - c.authorities[configStr] = ret - return ret, nil -} - -// unrefAuthority unrefs the authority. It also moves the authority to idle -// cache if it's ref count is 0. -// -// This function doesn't need to called explicitly. It's called by the returned -// unref from findAuthority(). -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) unrefAuthority(a *authority) { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if a.unref() > 0 { - return - } - configStr := a.config.String() - delete(c.authorities, configStr) - c.idleAuthorities.Add(configStr, a, func() { - a.close() - }) -} - // authority is a combination of pubsub and the controller for this authority. // // Note that it might make sense to use one pubsub for all the resources (for diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go index 4523a6131fd40..97fe4a8b0792c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -53,6 +53,7 @@ const ( gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" ) func init() { @@ -499,7 +500,7 @@ func (c *Config) updateNodeProto(node *v3corepb.Node) error { } v3.UserAgentName = gRPCUserAgentName v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) + v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) v3bytes, err := proto.Marshal(v3) if err != nil { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 817a4507eb349..8ae7301fb7e37 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -21,135 +21,26 @@ package xdsclient import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/internal/cache" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// clientImpl is the real implementation of the xds client. The exported Client -// is a wrapper of this struct with a ref count. -// -// Implements UpdateHandler interface. -// TODO(easwars): Make a wrapper struct which implements this interface in the -// style of ccBalancerWrapper so that the Client type does not implement these -// exported methods. -type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - - // authorityMu protects the authority fields. It's necessary because an - // authority is created when it's used. - authorityMu sync.Mutex - // authorities is a map from ServerConfig to authority. So that - // different authorities sharing the same ServerConfig can share the - // authority. - // - // The key is **ServerConfig.String()**, not the authority name. - // - // An authority is either in authorities, or idleAuthorities, - // never both. - authorities map[string]*authority - // idleAuthorities keeps the authorities that are not used (the last - // watch on it was canceled). They are kept in the cache and will be deleted - // after a timeout. The key is ServerConfig.String(). - // - // An authority is either in authorities, or idleAuthorities, - // never both. - idleAuthorities *cache.TimeoutCache - - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration -} - -// newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { - c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, - watchExpiryTimeout: watchExpiryTimeout, - authorities: make(map[string]*authority), - idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), - } - - c.logger = prefixLogger(c) - c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) - c.logger.Infof("Created") - return c, nil -} - -// BootstrapConfig returns the configuration read from the bootstrap file. -// Callers must treat the return value as read-only. -func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config { - return c.config -} - -// Close closes the gRPC connection to the management server. -func (c *clientImpl) Close() { - if c.done.HasFired() { - return - } - c.done.Fire() - // TODO: Should we invoke the registered callbacks here with an error that - // the client is closed? - - // Note that Close needs to check for nils even if some of them are always - // set in the constructor. This is because the constructor defers Close() in - // error cases, and the fields might not be set when the error happens. - - c.authorityMu.Lock() - for _, a := range c.authorities { - a.close() - } - c.idleAuthorities.Clear(true) - c.authorityMu.Unlock() - - c.logger.Infof("Shutdown") -} - -func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { - if fc == nil { - return nil - } - return c.securityConfigUpdateValidator(fc.SecurityCfg) -} - -func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { - if sc == nil { - return nil - } - if sc.IdentityInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { - return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) - } - } - if sc.RootInstanceName != "" { - if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { - return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) - } - } - return nil -} - -func (c *clientImpl) updateValidator(u interface{}) error { - switch update := u.(type) { - case xdsresource.ListenerUpdate: - if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { - return nil - } - return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) - case xdsresource.ClusterUpdate: - return c.securityConfigUpdateValidator(update.SecurityCfg) - default: - // We currently invoke this update validation function only for LDS and - // CDS updates. In the future, if we wish to invoke it for other xDS - // updates, corresponding plumbing needs to be added to those unmarshal - // functions. - } - return nil +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() + WatchEndpoints(string, func(xdsresource.EndpointsUpdate, error)) func() + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + + DumpLDS() map[string]xdsresource.UpdateWithMD + DumpRDS() map[string]xdsresource.UpdateWithMD + DumpCDS() map[string]xdsresource.UpdateWithMD + DumpEDS() map[string]xdsresource.UpdateWithMD + + BootstrapConfig() *bootstrap.Config + Close() } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go new file mode 100644 index 0000000000000..0631d3b0fadb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +// New returns a new xDS client configured by the bootstrap file specified in env +// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. +// +// The returned client is a reference counted singleton instance. This function +// creates a new client only when one doesn't already exist. +// +// Note that the first invocation of New() or NewWithConfig() sets the client +// singleton. The following calls will return the singleton client without +// checking or using the config. +func New() (XDSClient, error) { + return newRefCountedWithConfig(nil) +} + +// NewWithConfig returns a new xDS client configured by the given config. +// +// Internal/Testing Only +// +// This function should ONLY be used for internal (c2p resolver) and/or testing +// purposese. DO NOT use this elsewhere. Use New() instead. +func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { + return newRefCountedWithConfig(config) +} + +// newWithConfig returns a new xdsClient with the given config. +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + c := &clientImpl{ + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + + c.logger = prefixLogger(c) + c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer) + c.logger.Infof("Created") + return c, nil +} + +// NewWithConfigForTesting returns an xDS client for the specified bootstrap +// config, separate from the global singleton. +// +// Testing Only +// +// This function should ONLY be used for testing purposes. +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { + cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, err + } + return &clientRefCounted{clientImpl: cl, refCount: 1}, nil +} + +// NewWithBootstrapContentsForTesting returns an xDS client for this config, +// separate from the global singleton. +// +// +// Testing Only +// +// This function should ONLY be used for testing purposes. +func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { + // Normalize the contents + buf := bytes.Buffer{} + err := json.Indent(&buf, contents, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + } + contents = bytes.TrimSpace(buf.Bytes()) + + clientsMu.Lock() + defer clientsMu.Unlock() + if c := clients[string(contents)]; c != nil { + c.mu.Lock() + // Since we don't remove the *Client from the map when it is closed, we + // need to recreate the impl if the ref count dropped to zero. + if c.refCount > 0 { + c.refCount++ + c.mu.Unlock() + return c, nil + } + c.mu.Unlock() + } + + bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) + if err != nil { + return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) + } + + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, err + } + + c := &clientRefCounted{clientImpl: cImpl, refCount: 1} + clients[string(contents)] = c + return c, nil +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go new file mode 100644 index 0000000000000..800ae91fa311a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +var _ XDSClient = &clientImpl{} + +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +// +// Implements UpdateHandler interface. +// TODO(easwars): Make a wrapper struct which implements this interface in the +// style of ccBalancerWrapper so that the Client type does not implement these +// exported methods. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache + + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration +} + +// BootstrapConfig returns the configuration read from the bootstrap file. +// Callers must treat the return value as read-only. +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { + return c.config +} + +// Close closes the gRPC connection to the management server. +func (c *clientImpl) Close() { + if c.done.HasFired() { + return + } + c.done.Fire() + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + + c.logger.Infof("Shutdown") +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 0000000000000..623420ccc78f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,164 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/controller" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/pubsub" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +type controllerInterface interface { + AddWatch(resourceType xdsresource.ResourceType, resourceName string) + RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) + ReportLoad(server string) (*load.Store, func()) + Close() +} + +var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (controllerInterface, error) { + return controller.New(config, pubsub, validator, logger, boff) +} + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServer + if scheme == xdsresource.FederationScheme { + cfg, ok := c.config.Authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + if cfg.XDSServer != nil { + config = cfg.XDSServer + } + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.ref() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the config. But before that, it +// checks the cache to see if an authority for this config already exists. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + nodeID := "" + if v3, ok := c.config.XDSServer.NodeProto.(*v3corepb.Node); ok { + nodeID = v3.GetId() + } else if v2, ok := c.config.XDSServer.NodeProto.(*v2corepb.Node); ok { + nodeID = v2.GetId() + } + ret := &authority{config: config, pubsub: pubsub.New(c.watchExpiryTimeout, nodeID, c.logger)} + defer func() { + if retErr != nil { + ret.close() + } + }() + ctr, err := newController(config, ret.pubsub, c.updateValidator, c.logger, nil) + if err != nil { + return nil, err + } + ret.controller = ctr + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unref() > 0 { + return + } + configStr := a.config.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go similarity index 100% rename from vendor/google.golang.org/grpc/xds/internal/xdsclient/dump.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go similarity index 94% rename from vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index 32c7e9c9d7914..cba5afd454a71 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -28,7 +28,9 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { - a, err := c.newAuthority(server) + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + c.authorityMu.Unlock() if err != nil { c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) return nil, func() {} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_validator.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_validator.go new file mode 100644 index 0000000000000..50bdbe4e23f42 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_validator.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error { + if fc == nil { + return nil + } + return c.securityConfigUpdateValidator(fc.SecurityCfg) +} + +func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := c.config.CertProviderConfigs[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func (c *clientImpl) updateValidator(u interface{}) error { + switch update := u.(type) { + case xdsresource.ListenerUpdate: + if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil { + return nil + } + return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator) + case xdsresource.ClusterUpdate: + return c.securityConfigUpdateValidator(update.SecurityCfg) + default: + // We currently invoke this update validation function only for LDS and + // CDS updates. In the future, if we wish to invoke it for other xDS + // updates, corresponding plumbing needs to be added to those unmarshal + // functions. + } + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go similarity index 100% rename from vendor/google.golang.org/grpc/xds/internal/xdsclient/watchers.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller.go deleted file mode 100644 index 431a14498e1fe..0000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/controller" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/pubsub" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -type controllerInterface interface { - AddWatch(resourceType xdsresource.ResourceType, resourceName string) - RemoveWatch(resourceType xdsresource.ResourceType, resourceName string) - ReportLoad(server string) (*load.Store, func()) - Close() -} - -var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) { - return controller.New(config, pubsub, validator, logger) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go index 3f7371ae63c75..4b07dc8d6ac59 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go @@ -72,7 +72,7 @@ type Controller struct { watchMap map[xdsresource.ResourceType]map[string]bool // versionMap contains the version that was acked (the version in the ack // request that was sent on wire). The key is rType, the value is the - // version string, becaues the versions for different resource types should + // version string, because the versions for different resource types should // be independent. versionMap map[xdsresource.ResourceType]string // nonceMap contains the nonce from the most recent received response. @@ -100,7 +100,7 @@ func SetGRPCDial(dialer func(target string, opts ...grpc.DialOption) (*grpc.Clie } // New creates a new controller. -func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (_ *Controller, retErr error) { +func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger, boff func(int) time.Duration) (_ *Controller, retErr error) { switch { case config == nil: return nil, errors.New("xds: no xds_server provided") @@ -120,12 +120,15 @@ func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, val }), } + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } ret := &Controller{ config: config, updateValidator: validator, updateHandler: updateHandler, - backoff: backoff.DefaultExponential.Backoff, // TODO: should this be configurable? + backoff: boff, streamCh: make(chan grpc.ClientStream, 1), sendCh: buffer.NewUnbounded(), watchMap: make(map[xdsresource.ResourceType]map[string]bool), diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/transport.go index 0b982b0d70575..28641dc874a40 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/transport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/transport.go @@ -59,26 +59,21 @@ func (t *Controller) run(ctx context.Context) { // report error (and log) when stats is transient failure. retries := 0 - for { - select { - case <-ctx.Done(): - return - default: - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) + lastStreamStartTime := time.Time{} + for ctx.Err() == nil { + dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) + if dur > 0 { + timer := time.NewTimer(dur) select { case <-timer.C: case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } + timer.Stop() return } } retries++ + lastStreamStartTime = time.Now() stream, err := t.vClient.NewStream(ctx, t.cc) if err != nil { t.updateHandler.NewConnectionError(err) @@ -104,10 +99,10 @@ func (t *Controller) run(ctx context.Context) { // new requests to send on the stream. // // For each new request (watchAction), it's -// - processed and added to the watch map -// - so resend will pick them up when there are new streams -// - sent on the current stream if there's one -// - the current stream is cleared when any send on it fails +// - processed and added to the watch map +// so, resend will pick them up when there are new streams +// - sent on the current stream if there's one +// the current stream is cleared when any send on it fails // // For each new stream, all the existing requests will be resent. // @@ -171,12 +166,20 @@ func (t *Controller) sendExisting(stream grpc.ClientStream) bool { t.mu.Lock() defer t.mu.Unlock() - // Reset the ack versions when the stream restarts. - t.versionMap = make(map[xdsresource.ResourceType]string) + // Reset only the nonce when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream t.nonceMap = make(map[xdsresource.ResourceType]string) for rType, s := range t.watchMap { - if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil { + if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, t.versionMap[rType], "", ""); err != nil { t.logger.Warningf("ADS request failed: %v", err) return false } @@ -188,17 +191,17 @@ func (t *Controller) sendExisting(stream grpc.ClientStream) bool { // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. func (t *Controller) recv(stream grpc.ClientStream) bool { - success := false + msgReceived := false for { resp, err := t.vClient.RecvResponse(stream) if err != nil { t.updateHandler.NewConnectionError(err) t.logger.Warningf("ADS stream is closed with error: %v", err) - return success + return msgReceived } + msgReceived = true rType, version, nonce, err := t.handleResponse(resp) - if e, ok := err.(xdsresourceversion.ErrResourceTypeUnsupported); ok { t.logger.Warningf("%s", e.ErrStr) continue @@ -221,7 +224,6 @@ func (t *Controller) recv(stream grpc.ClientStream) bool { stream: stream, }) t.logger.Infof("Sending ACK for response type: %v, version: %v, nonce: %v", rType, version, nonce) - success = true } } @@ -302,8 +304,8 @@ func (t *Controller) processWatchInfo(w *watchAction) (target []string, rType xd rType = w.rType target = mapToSlice(current) // We don't reset version or nonce when a new watch is started. The version - // and nonce from previous response are carried by the request unless the - // stream is recreated. + // and nonce from previous response are carried by the request. Only the nonce + // is reset when the stream is recreated. ver = t.versionMap[rType] nonce = t.nonceMap[rType] return target, rType, ver, nonce @@ -371,44 +373,48 @@ func (t *Controller) processAckInfo(ack *ackAction, stream grpc.ClientStream) (t // It blocks until the context is cancelled. func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts controllerversion.LoadReportingOptions) { retries := 0 - for { - if ctx.Err() != nil { - return - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) + lastStreamStartTime := time.Time{} + for ctx.Err() == nil { + dur := time.Until(lastStreamStartTime.Add(t.backoff(retries))) + if dur > 0 { + timer := time.NewTimer(dur) select { case <-timer.C: case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } + timer.Stop() return } } retries++ - stream, err := t.vClient.NewLoadStatsStream(ctx, cc) - if err != nil { - t.logger.Warningf("lrs: failed to create stream: %v", err) - continue - } - t.logger.Infof("lrs: created LRS stream") + lastStreamStartTime = time.Now() + func() { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := t.vClient.NewLoadStatsStream(streamCtx, cc) + if err != nil { + t.logger.Warningf("lrs: failed to create stream: %v", err) + return + } + t.logger.Infof("lrs: created LRS stream") - if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { - t.logger.Warningf("lrs: failed to send first request: %v", err) - continue - } + if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { + t.logger.Warningf("lrs: failed to send first request: %v", err) + return + } - clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("%v", err) - continue - } + clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("lrs: error from stream: %v", err) + return + } - retries = 0 - t.sendLoads(ctx, stream, opts.LoadStore, clusters, interval) + retries = 0 + t.sendLoads(streamCtx, stream, opts.LoadStore, clusters, interval) + }() } } @@ -422,7 +428,7 @@ func (t *Controller) sendLoads(ctx context.Context, stream grpc.ClientStream, st return } if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil { - t.logger.Warningf("%v", err) + t.logger.Warningf("lrs: error from stream: %v", err) return } } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/loadreport.go index f0034e21c353c..da5128ac456e6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/loadreport.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "io" "time" "github.com/golang/protobuf/proto" @@ -59,7 +60,11 @@ func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { req := &lrspb.LoadStatsRequest{Node: node} v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err } func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { @@ -149,5 +154,17 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} v2c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/loadreport.go index 8cdb5476fbbd4..f8d866bb1a590 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/loadreport.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "io" "time" "github.com/golang/protobuf/proto" @@ -59,7 +60,11 @@ func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { req := &lrspb.LoadStatsRequest{Node: node} v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err } func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { @@ -148,5 +153,17 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} v3c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) - return stream.Send(req) + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/pubsub.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/pubsub.go index a843fd5f191f9..95e8ac77300ec 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/pubsub.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/pubsub.go @@ -41,6 +41,7 @@ type Pubsub struct { done *grpcsync.Event logger *grpclog.PrefixLogger watchExpiryTimeout time.Duration + nodeID string updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate // All the following maps are to keep the updates/metadata in a cache. @@ -60,11 +61,14 @@ type Pubsub struct { } // New creates a new Pubsub. -func New(watchExpiryTimeout time.Duration, logger *grpclog.PrefixLogger) *Pubsub { +// +// The passed in nodeID will be attached to all errors sent to the watchers. +func New(watchExpiryTimeout time.Duration, nodeID string, logger *grpclog.PrefixLogger) *Pubsub { pb := &Pubsub{ done: grpcsync.NewEvent(), logger: logger, watchExpiryTimeout: watchExpiryTimeout, + nodeID: nodeID, updateCh: buffer.NewUnbounded(), ldsWatchers: make(map[string]map[*watchInfo]bool), diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/update.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/update.go index 371405b67972e..9ae6ae9767121 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/update.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/update.go @@ -232,7 +232,7 @@ func (pb *Pubsub) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTup // from cache, and also send an resource not found error to indicate // resource removed. delete(pb.cdsCache, name) - pb.ldsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + pb.cdsMD[name] = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for wi := range pb.cdsWatchers[name] { wi.resourceNotFound() } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/watch.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/watch.go index 0baa683175dd2..bef179936a895 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/watch.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/pubsub/watch.go @@ -100,9 +100,7 @@ func (wi *watchInfo) timeout() { // Caller must hold wi.mu. func (wi *watchInfo) sendErrorLocked(err error) { - var ( - u interface{} - ) + var u interface{} switch wi.rType { case xdsresource.ListenerResource: u = xdsresource.ListenerUpdate{} @@ -113,6 +111,15 @@ func (wi *watchInfo) sendErrorLocked(err error) { case xdsresource.EndpointsResource: u = xdsresource.EndpointsUpdate{} } + + errMsg := err.Error() + errTyp := xdsresource.ErrType(err) + if errTyp == xdsresource.ErrorTypeUnknown { + err = fmt.Errorf("%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) + } else { + err = xdsresource.NewErrorf(errTyp, "%v, xDS client nodeID: %s", errMsg, wi.c.nodeID) + } + wi.c.scheduleCallback(wi, u, err) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go index f4951ba8f4886..c07dd4323f769 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go @@ -19,8 +19,6 @@ package xdsclient import ( - "bytes" - "encoding/json" "fmt" "sync" "time" @@ -34,7 +32,7 @@ const ( ) var ( - // This is the Client returned by New(). It contains one client implementation, + // This is the client returned by New(). It contains one client implementation, // and maintains the refcount. singletonClient = &clientRefCounted{} @@ -63,55 +61,6 @@ func (o *onceClosingClient) Close() { o.once.Do(o.XDSClient.Close) } -// clientRefCounted is ref-counted, and to be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type clientRefCounted struct { - *clientImpl - - // This mu protects all the fields, including the embedded clientImpl above. - mu sync.Mutex - refCount int -} - -// New returns a new xdsClient configured by the bootstrap file specified in env -// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -func New() (XDSClient, error) { - // This cannot just return newRefCounted(), because in error cases, the - // returned nil is a typed nil (*clientRefCounted), which may cause nil - // checks fail. - c, err := newRefCounted() - if err != nil { - return nil, err - } - return c, nil -} - -func newRefCounted() (XDSClient, error) { - return newRefCountedWithConfig(nil) -} - -// NewWithConfig returns a new xdsClient configured by the given config. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -// -// This function is internal only, for c2p resolver and testing to use. DO NOT -// use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { - return newRefCountedWithConfig(config) -} - func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() @@ -144,6 +93,16 @@ func newRefCountedWithConfig(config *bootstrap.Config) (XDSClient, error) { return &onceClosingClient{XDSClient: singletonClient}, nil } +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + // This mu protects all the fields, including the embedded clientImpl above. + mu sync.Mutex + refCount int +} + // Close closes the client. It does ref count of the xds client implementation, // and closes the gRPC connection to the management server when ref count // reaches 0. @@ -159,62 +118,3 @@ func (c *clientRefCounted) Close() { singletonClientImplCloseHook() } } - -// NewWithConfigForTesting returns an xdsClient for the specified bootstrap -// config, separate from the global singleton. -// -// This should be used for testing purposes only. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { - cl, err := newWithConfig(config, watchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, err - } - return &clientRefCounted{clientImpl: cl, refCount: 1}, nil -} - -// NewWithBootstrapContentsForTesting returns an xdsClient for this config, -// separate from the global singleton. -// -// This should be used for testing purposes only. -func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, error) { - // Normalize the contents - buf := bytes.Buffer{} - err := json.Indent(&buf, contents, "", "") - if err != nil { - return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) - } - contents = bytes.TrimSpace(buf.Bytes()) - - clientsMu.Lock() - defer clientsMu.Unlock() - if c := clients[string(contents)]; c != nil { - c.mu.Lock() - // Since we don't remove the *Client from the map when it is closed, we - // need to recreate the impl if the ref count dropped to zero. - if c.refCount > 0 { - c.refCount++ - c.mu.Unlock() - return c, nil - } - c.mu.Unlock() - } - - bcfg, err := bootstrap.NewConfigFromContentsForTesting(contents) - if err != nil { - return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) - } - - cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, err - } - - c := &clientRefCounted{clientImpl: cImpl, refCount: 1} - clients[string(contents)] = c - return c, nil -} - -var ( - clients = map[string]*clientRefCounted{} - clientsMu sync.Mutex -) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go index eb1ee323cee9c..80c0efd37b394 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/name.go @@ -119,7 +119,7 @@ func (n *Name) String() string { path := n.Type if n.ID != "" { - path = path + "/" + n.ID + path = "/" + path + "/" + n.ID } tempURL := &url.URL{ diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go index c64f7c609c62b..faf34f98e3c74 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type.go @@ -20,6 +20,8 @@ package xdsresource import ( "time" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/golang/protobuf/proto" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" "google.golang.org/protobuf/types/known/anypb" ) @@ -76,6 +78,21 @@ func IsEndpointsResource(url string) bool { return url == version.V2EndpointsURL || url == version.V3EndpointsURL } +// unwrapResource unwraps and returns the inner resource if it's in a resource +// wrapper. The original resource is returned if it's not wrapped. +func unwrapResource(r *anypb.Any) (*anypb.Any, error) { + url := r.GetTypeUrl() + if url != version.V2ResourceWrapperURL && url != version.V3ResourceWrapperURL { + // Not wrapped. + return r, nil + } + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return nil, err + } + return inner.Resource, nil +} + // ServiceStatus is the status of the update. type ServiceStatus int diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go index b61a80b429c43..d459717acd23c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_cds.go @@ -76,7 +76,7 @@ type OutlierDetection struct { // ejected due to outlier detection. Defaults to 10% but will eject at least // one host regardless of the value. MaxEjectionPercent uint32 - // SuccessRateStddevFactor is used to determine the ejection threshold for + // SuccessRateStdevFactor is used to determine the ejection threshold for // success rate outlier ejection. The ejection threshold is the difference // between the mean success rate, and the product of this factor and the // standard deviation of the mean success rate: mean - (stdev * diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go index ad590160f6af3..ec70f32ca4365 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -64,7 +64,10 @@ type Locality struct { // EndpointsUpdate contains an EDS update. type EndpointsUpdate struct { - Drops []OverloadDropConfig + Drops []OverloadDropConfig + // Localities in the EDS response with `load_balancing_weight` field not set + // or explicitly set to 0 are ignored while parsing the resource, and + // therefore do not show up here. Localities []Locality // Raw is the resource from the xds response. diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go index decffd4ae7673..0504346c399f9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_rds.go @@ -80,7 +80,7 @@ const ( // HashPolicyTypeHeader specifies to hash a Header in the incoming request. HashPolicyTypeHeader HashPolicyType = iota // HashPolicyTypeChannelID specifies to hash a unique Identifier of the - // Channel. In grpc-go, this will be done using the ClientConn pointer. + // Channel. This is a 64-bit random int computed at initialization time. HashPolicyTypeChannelID ) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index eba78716eebd3..3621d61209a0b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -51,6 +51,11 @@ func UnmarshalCluster(opts *UnmarshalOptions) (map[string]ClusterUpdateErrTuple, } func unmarshalClusterResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsClusterResource(r.GetTypeUrl()) { return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } @@ -150,7 +155,14 @@ func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (Clu // xdsclient bootstrap information now (can be added if necessary). The // ServerConfig will be read and populated by the CDS balancer when // processing this field. - if cluster.GetLrsServer().GetSelf() != nil { + // According to A27: + // If the `lrs_server` field is set, it must have its `self` field set, in + // which case the client should use LRS for load reporting. Otherwise + // (the `lrs_server` field is not set), LRS load reporting will be disabled. + if lrs := cluster.GetLrsServer(); lrs != nil { + if lrs.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("unsupported config_source_specifier %T in lrs_server field", lrs.ConfigSourceSpecifier) + } ret.LRSServerConfig = ClusterLRSServerSelf } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index f1774dedae434..7d4b89dc9dc10 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -42,6 +42,11 @@ func UnmarshalEndpoints(opts *UnmarshalOptions) (map[string]EndpointsUpdateErrTu } func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsEndpointsResource(r.GetTypeUrl()) { return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } @@ -52,7 +57,7 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str } logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) - u, err := parseEDSRespProto(cla) + u, err := parseEDSRespProto(cla, logger) if err != nil { return cla.GetClusterName(), EndpointsUpdate{}, err } @@ -85,39 +90,67 @@ func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropO } } -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) ([]Endpoint, error) { endpoints := make([]Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { + // If the load_balancing_weight field is specified, it must be set to a + // value of at least 1. If unspecified, each host is presumed to have + // equal weight in a locality. + weight := uint32(1) + if w := lbEndpoint.GetLoadBalancingWeight(); w != nil { + if w.GetValue() == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } + weight = w.GetValue() + } endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), + Weight: weight, }) } - return endpoints + return endpoints, nil } -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment, logger *grpclog.PrefixLogger) (EndpointsUpdate, error) { ret := EndpointsUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } - priorities := make(map[uint32]struct{}) + priorities := make(map[uint32]map[string]bool) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) } + weight := locality.GetLoadBalancingWeight().GetValue() + if weight == 0 { + logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) + continue + } lid := internal.LocalityID{ Region: l.Region, Zone: l.Zone, SubZone: l.SubZone, } priority := locality.GetPriority() - priorities[priority] = struct{}{} + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lidStr, _ := lid.ToString() + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true + endpoints, err := parseEndpoints(locality.GetLbEndpoints()) + if err != nil { + return EndpointsUpdate{}, err + } ret.Localities = append(ret.Localities, Locality{ ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), + Endpoints: endpoints, Weight: locality.GetLoadBalancingWeight().GetValue(), Priority: priority, }) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go index b259c7b87e4c7..2e59c0605c9b6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -46,6 +46,11 @@ func UnmarshalListener(opts *UnmarshalOptions) (map[string]ListenerUpdateErrTupl } func unmarshalListenerResource(r *anypb.Any, f UpdateValidatorFunc, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsListenerResource(r.GetTypeUrl()) { return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go index f43b18292f0c3..32c48d46b6919 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -46,6 +46,11 @@ func UnmarshalRouteConfig(opts *UnmarshalOptions) (map[string]RouteConfigUpdateE } func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { + r, err := unwrapResource(r) + if err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + if !IsRouteConfigResource(r.GetTypeUrl()) { return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) } @@ -142,6 +147,12 @@ func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (m for _, csp := range csps { cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) if cs == nil { + if csp.GetIsOptional() { + // "If a plugin is not supported but has is_optional set, then + // we will ignore any routes that point to that plugin" + cspCfgs[csp.GetExtension().GetName()] = nil + continue + } // "If no plugin is registered for it, the resource will be NACKed." // - RLS in xDS design return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) @@ -349,11 +360,23 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif if totalWeight == 0 { return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) } - case *v3routepb.RouteAction_ClusterHeader: - continue case *v3routepb.RouteAction_ClusterSpecifierPlugin: + // gRFC A28 was updated to say the following: + // + // The route’s action field must be route, and its + // cluster_specifier: + // - Can be Cluster + // - Can be Weighted_clusters + // - The sum of weights must add up to the total_weight. + // - Can be unset or an unsupported field. The route containing + // this action will be ignored. + // + // This means that if this env var is not set, we should treat + // it as if it we didn't know about the cluster_specifier_plugin + // at all. if !envconfig.XDSRLS { - return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + logger.Infof("route %+v contains route_action with unsupported field: cluster_specifier_plugin, the route will be ignored", r) + continue } if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { // "When processing RouteActions, if any action includes a @@ -362,10 +385,15 @@ func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecif // resource will be NACKed." - RLS in xDS design return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) } + if csps[a.ClusterSpecifierPlugin] == nil { + logger.Infof("route %+v references optional and unsupported cluster specifier plugin %v, the route will be ignored", r, a.ClusterSpecifierPlugin) + continue + } cspNames[a.ClusterSpecifierPlugin] = true route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin default: - return nil, nil, fmt.Errorf("route %+v, has an unknown ClusterSpecifier: %+v", r, a) + logger.Infof("route %+v references unknown ClusterSpecifier %+v, the route will be ignored", r, a) + continue } msd := action.GetMaxStreamDuration() diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go index edfa68762f6e5..2c4819abddc0f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version/version.go @@ -42,6 +42,7 @@ const ( V2ClusterType = "envoy.api.v2.Cluster" V2EndpointsType = "envoy.api.v2.ClusterLoadAssignment" + V2ResourceWrapperURL = googleapiPrefix + "envoy.api.v2.Resource" V2ListenerURL = googleapiPrefix + V2ListenerType V2RouteConfigURL = googleapiPrefix + V2RouteConfigType V2ClusterURL = googleapiPrefix + V2ClusterType @@ -53,6 +54,7 @@ const ( V3ClusterType = "envoy.config.cluster.v3.Cluster" V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + V3ResourceWrapperURL = googleapiPrefix + "envoy.service.discovery.v3.Resource" V3ListenerURL = googleapiPrefix + V3ListenerType V3RouteConfigURL = googleapiPrefix + V3RouteConfigType V3ClusterURL = googleapiPrefix + V3ClusterType diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index 0319ddcaf533a..5ab8a5a98008d 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -105,10 +105,10 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { s := &GRPCServer{ gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), - opts: handleServerOptions(opts), } s.logger = prefixLogger(s) s.logger.Infof("Created xds.GRPCServer") + s.handleServerOptions(opts) // We type assert our underlying gRPC server to the real grpc.Server here // before trying to retrieve the configured credentials. This approach @@ -128,14 +128,35 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { // handleServerOptions iterates through the list of server options passed in by // the user, and handles the xDS server specific options. -func handleServerOptions(opts []grpc.ServerOption) *serverOptions { - so := &serverOptions{} +func (s *GRPCServer) handleServerOptions(opts []grpc.ServerOption) { + so := s.defaultServerOptions() for _, opt := range opts { if o, ok := opt.(*serverOption); ok { o.apply(so) } } - return so + s.opts = so +} + +func (s *GRPCServer) defaultServerOptions() *serverOptions { + return &serverOptions{ + // A default serving mode change callback which simply logs at the + // default-visible log level. This will be used if the application does not + // register a mode change callback. + // + // Note that this means that `s.opts.modeCallback` will never be nil and can + // safely be invoked directly from `handleServingModeChanges`. + modeCallback: s.loggingServerModeChangeCallback, + } +} + +func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args ServingModeChangeArgs) { + switch args.Mode { + case connectivity.ServingModeServing: + s.logger.Errorf("Listener %q entering mode: %q", addr.String(), args.Mode) + case connectivity.ServingModeNotServing: + s.logger.Errorf("Listener %q entering mode: %q due to error: %v", addr.String(), args.Mode, args.Err) + } } // RegisterService registers a service and its implementation to the underlying @@ -291,12 +312,16 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { drainServerTransports(gs, args.addr.String()) } } - if s.opts.modeCallback != nil { - s.opts.modeCallback(args.addr, ServingModeChangeArgs{ - Mode: args.mode, - Err: args.err, - }) - } + + // The XdsServer API will allow applications to register a "serving state" + // callback to be invoked when the server begins serving and when the + // server encounters errors that force it to be "not serving". If "not + // serving", the callback must be provided error information, for + // debugging use by developers - A36. + s.opts.modeCallback(args.addr, ServingModeChangeArgs{ + Mode: args.mode, + Err: args.err, + }) } } } diff --git a/vendor/google.golang.org/grpc/xds/xds.go b/vendor/google.golang.org/grpc/xds/xds.go index 744f3f139645e..2fbce34663c05 100644 --- a/vendor/google.golang.org/grpc/xds/xds.go +++ b/vendor/google.golang.org/grpc/xds/xds.go @@ -32,16 +32,17 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + "google.golang.org/grpc/internal" internaladmin "google.golang.org/grpc/internal/admin" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. - xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register the v2 xDS API client. _ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register the v3 xDS API client. ) @@ -75,8 +76,8 @@ func init() { }) } -// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using -// the provided xds bootstrap config instead of the global configuration from +// NewXDSResolverWithConfigForTesting creates a new xDS resolver builder using +// the provided xDS bootstrap config instead of the global configuration from // the supported environment variables. The resolver.Builder is meant to be // used in conjunction with the grpc.WithResolvers DialOption. // @@ -90,5 +91,5 @@ func init() { // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { - return xdsresolver.NewBuilderForTesting(bootstrapConfig) + return internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error))(bootstrapConfig) } diff --git a/vendor/modules.txt b/vendor/modules.txt index ce6a718da7181..07eae41fecb26 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1513,8 +1513,8 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.50.1 => google.golang.org/grpc v1.45.0 -## explicit; go 1.14 +# google.golang.org/grpc v1.50.1 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1528,6 +1528,7 @@ google.golang.org/grpc/balancer/weightedroundrobin google.golang.org/grpc/balancer/weightedtarget google.golang.org/grpc/balancer/weightedtarget/weightedaggregator google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -1551,6 +1552,7 @@ google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/admin google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancergroup google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog @@ -1568,6 +1570,7 @@ google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/hierarchy google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proto/grpc_lookup_v1 google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1601,9 +1604,11 @@ google.golang.org/grpc/xds/internal/balancer/clusterimpl google.golang.org/grpc/xds/internal/balancer/clustermanager google.golang.org/grpc/xds/internal/balancer/clusterresolver google.golang.org/grpc/xds/internal/balancer/loadstore +google.golang.org/grpc/xds/internal/balancer/outlierdetection google.golang.org/grpc/xds/internal/balancer/priority google.golang.org/grpc/xds/internal/balancer/ringhash google.golang.org/grpc/xds/internal/clusterspecifier +google.golang.org/grpc/xds/internal/clusterspecifier/rls google.golang.org/grpc/xds/internal/httpfilter google.golang.org/grpc/xds/internal/httpfilter/fault google.golang.org/grpc/xds/internal/httpfilter/rbac @@ -1951,6 +1956,5 @@ sigs.k8s.io/yaml # github.com/hashicorp/consul => github.com/hashicorp/consul v1.5.1 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/cloudflare/cloudflare-go => github.com/cyriltovena/cloudflare-go v0.27.1-0.20211118103540-ff77400bcb93 -# google.golang.org/grpc => google.golang.org/grpc v1.45.0 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # github.com/go-kit/log => github.com/dannykopping/go-kit-log v0.2.2-0.20221002180827-5591c1641b6b