diff --git a/go.mod b/go.mod
index 1f9c7ca2e1..80a3ba01f7 100644
--- a/go.mod
+++ b/go.mod
@@ -40,7 +40,6 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
- github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -54,15 +53,12 @@ require (
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
- github.com/go-playground/locales v0.14.1 // indirect
- github.com/go-playground/universal-translator v0.18.1 // indirect
- github.com/go-playground/validator/v10 v10.13.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-containerregistry v0.14.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
- github.com/google/trillian v1.5.1 // indirect
+ github.com/google/trillian v1.5.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@@ -75,7 +71,6 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
- github.com/leodido/go-urn v1.2.3 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -99,7 +94,7 @@ require (
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.3.1 // indirect
- github.com/sigstore/rekor v1.1.1 // indirect
+ github.com/sigstore/rekor v1.2.2-0.20230529154427-55a5a338d149 // indirect
github.com/sigstore/sigstore v1.6.4 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
@@ -107,7 +102,6 @@ require (
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
- github.com/transparency-dev/merkle v0.0.1 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/vbauerster/mpb/v8 v8.4.0 // indirect
@@ -120,23 +114,19 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.15.0 // indirect
go.opentelemetry.io/otel/trace v1.15.0 // indirect
- go.uber.org/atomic v1.10.0 // indirect
- go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.24.0 // indirect
- golang.org/x/crypto v0.8.0 // indirect
+ golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.10.0 // indirect
- golang.org/x/net v0.9.0 // indirect
+ golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.7.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
- golang.org/x/tools v0.7.0 // indirect
+ golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
- google.golang.org/grpc v1.54.0 // indirect
+ google.golang.org/grpc v1.55.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/klog/v2 v2.90.1 // indirect
)
diff --git a/go.sum b/go.sum
index 9829016cda..e64218a084 100644
--- a/go.sum
+++ b/go.sum
@@ -18,7 +18,6 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@@ -77,11 +76,8 @@ github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVB
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
-github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
-github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -122,15 +118,7 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
-github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
-github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
-github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
-github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
-github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ=
-github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4=
github.com/go-rod/rod v0.112.9 h1:uA/yLbB+t0UlqJcLJtK2pZrCNPzd15dOKRUEOnmnt9k=
-github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@@ -167,7 +155,6 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -192,7 +179,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
@@ -200,9 +186,9 @@ github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMd
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
-github.com/google/trillian v1.5.1 h1:2p1l13f0eWd7eOShwarwIxutYYnGzY/5S+xYewQIPkU=
-github.com/google/trillian v1.5.1/go.mod h1:EcDttN8nf+EoAiyLigBAp9ebncZI6rhJPyxZ+dQ6HSo=
+github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY=
+github.com/google/trillian v1.5.2 h1:roGP6G8aaAch7vP08+oitPkvmZzxjTfIkguozqJ04Ok=
+github.com/google/trillian v1.5.2/go.mod h1:H8vOoa2dxd3xCdMzOOwt9kIz/3MSoJhcqLJGG8iRwbg=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -254,11 +240,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
-github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
-github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@@ -296,7 +279,6 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -327,9 +309,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
-github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
+github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -352,8 +334,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
-github.com/sigstore/rekor v1.1.1 h1:JCeSss+qUHnCATmwAZh4zT9k0Frdyq0BjmRwewSfEy4=
-github.com/sigstore/rekor v1.1.1/go.mod h1:x/xK+HK08MiuJv+v4OxY/Oo3bhuz1DtJXNJrV7hrzvs=
+github.com/sigstore/rekor v1.2.2-0.20230529154427-55a5a338d149 h1:nq4M06IMfNREIBMkCGVyQQJMTZi5YNqeoaVV9yzIARU=
+github.com/sigstore/rekor v1.2.2-0.20230529154427-55a5a338d149/go.mod h1:LiLDoAgQf+dFuuRg8y+iXBJekKkQueIrpcKzDYcUnvQ=
github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE=
github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -385,7 +367,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/sylabs/sif/v2 v2.11.3 h1:EQxi5zl6i5DsbVal9HHpk/zuSx7aNLeZBy8vmvFz838=
@@ -400,8 +381,6 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
-github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E=
-github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
@@ -452,13 +431,7 @@ go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfA
go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk=
go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
-go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
@@ -469,8 +442,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
-golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
@@ -499,8 +472,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
-golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
@@ -571,8 +544,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
-golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -591,8 +564,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
-google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -634,5 +607,4 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
-k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore
deleted file mode 100644
index ba22c99a99..0000000000
--- a/vendor/github.com/go-chi/chi/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-.idea
-*.sw?
-.vscode
diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml
deleted file mode 100644
index 7b8e26bcee..0000000000
--- a/vendor/github.com/go-chi/chi/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-language: go
-
-go:
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - 1.14.x
-
-script:
- - go get -d -t ./...
- - go vet ./...
- - go test ./...
- - >
- go_version=$(go version);
- if [ ${go_version:13:4} = "1.12" ]; then
- go get -u golang.org/x/tools/cmd/goimports;
- goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :;
- fi
-
diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md
deleted file mode 100644
index 9a64a72eec..0000000000
--- a/vendor/github.com/go-chi/chi/CHANGELOG.md
+++ /dev/null
@@ -1,190 +0,0 @@
-# Changelog
-
-## v4.1.2 (2020-06-02)
-
-- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
-- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
-- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
-
-
-## v4.1.1 (2020-04-16)
-
-- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
- route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
-- new middleware.RouteHeaders as a simple router for request headers with wildcard support
-- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
-
-
-## v4.1.0 (2020-04-1)
-
-- middleware.LogEntry: Write method on interface now passes the response header
- and an extra interface type useful for custom logger implementations.
-- middleware.WrapResponseWriter: minor fix
-- middleware.Recoverer: a bit prettier
-- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
-
-
-## v4.0.4 (2020-03-24)
-
-- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
-- a few minor improvements and fixes
-- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
-
-
-## v4.0.3 (2020-01-09)
-
-- core: fix regexp routing to include default value when param is not matched
-- middleware: rewrite of middleware.Compress
-- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
-- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
-
-
-## v4.0.2 (2019-02-26)
-
-- Minor fixes
-- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
-
-
-## v4.0.1 (2019-01-21)
-
-- Fixes issue with compress middleware: #382 #385
-- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
-
-
-## v4.0.0 (2019-01-10)
-
-- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
-- router: respond with 404 on router with no routes (#362)
-- router: additional check to ensure wildcard is at the end of a url pattern (#333)
-- middleware: deprecate use of http.CloseNotifier (#347)
-- middleware: fix RedirectSlashes to include query params on redirect (#334)
-- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
-
-
-## v3.3.4 (2019-01-07)
-
-- Minor middleware improvements. No changes to core library/router. Moving v3 into its
-- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
-- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
-
-
-## v3.3.3 (2018-08-27)
-
-- Minor release
-- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
-
-
-## v3.3.2 (2017-12-22)
-
-- Support to route trailing slashes on mounted sub-routers (#281)
-- middleware: new `ContentCharset` to check matching charsets. Thank you
- @csucu for your community contribution!
-
-
-## v3.3.1 (2017-11-20)
-
-- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
-- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
-- Minor bug fixes
-
-
-## v3.3.0 (2017-10-10)
-
-- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
-- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
-
-
-## v3.2.1 (2017-08-31)
-
-- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
- and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
-- Add new `RouteMethod` to `*Context`
-- Add new `Routes` pointer to `*Context`
-- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
-- Updated benchmarks (see README)
-
-
-## v3.1.5 (2017-08-02)
-
-- Setup golint and go vet for the project
-- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
- to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
-
-
-## v3.1.0 (2017-07-10)
-
-- Fix a few minor issues after v3 release
-- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
-- Move `render` sub-pkg to https://github.com/go-chi/render
-- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
- suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
- https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
-
-
-## v3.0.0 (2017-06-21)
-
-- Major update to chi library with many exciting updates, but also some *breaking changes*
-- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
- `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
- same router
-- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
- `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
-- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
- `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
- in `_examples/custom-handler`
-- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
- own using file handler with the stdlib, see `_examples/fileserver` for an example
-- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
-- Moved the chi project to its own organization, to allow chi-related community packages to
- be easily discovered and supported, at: https://github.com/go-chi
-- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
-- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
-
-
-## v2.1.0 (2017-03-30)
-
-- Minor improvements and update to the chi core library
-- Introduced a brand new `chi/render` sub-package to complete the story of building
- APIs to offer a pattern for managing well-defined request / response payloads. Please
- check out the updated `_examples/rest` example for how it works.
-- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
-
-
-## v2.0.0 (2017-01-06)
-
-- After many months of v2 being in an RC state with many companies and users running it in
- production, the inclusion of some improvements to the middlewares, we are very pleased to
- announce v2.0.0 of chi.
-
-
-## v2.0.0-rc1 (2016-07-26)
-
-- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
- community `"net/context"` package has been included in the standard library as `"context"` and
- utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
- request-scoped values. We're very excited about the new context addition and are proud to
- introduce chi v2, a minimal and powerful routing package for building large HTTP services,
- with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
- stdlib HTTP handlers and middlwares.
-- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
-- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
-- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
- which provides direct access to URL routing parameters, the routing path and the matching
- routing patterns.
-- Users upgrading from chi v1 to v2, need to:
- 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
- the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
- 2. Use `chi.URLParam(r *http.Request, paramKey string) string`
- or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
-
-
-## v1.0.0 (2016-07-01)
-
-- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
-
-
-## v0.9.0 (2016-03-31)
-
-- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
-- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
- has changed to: `chi.URLParam(ctx, "id")`
diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
deleted file mode 100644
index c0ac2dfe85..0000000000
--- a/vendor/github.com/go-chi/chi/CONTRIBUTING.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Contributing
-
-## Prerequisites
-
-1. [Install Go][go-install].
-2. Download the sources and switch the working directory:
-
- ```bash
- go get -u -d github.com/go-chi/chi
- cd $GOPATH/src/github.com/go-chi/chi
- ```
-
-## Submitting a Pull Request
-
-A typical workflow is:
-
-1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
-2. [Create a topic branch.][branch]
-3. Add tests for your change.
-4. Run `go test`. If your tests pass, return to the step 3.
-5. Implement the change and ensure the steps from the previous step pass.
-6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
-7. [Add, commit and push your changes.][git-help]
-8. [Submit a pull request.][pull-req]
-
-[go-install]: https://golang.org/doc/install
-[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
-[fork]: https://help.github.com/articles/fork-a-repo
-[branch]: http://learn.github.com/p/branching.html
-[git-help]: https://guides.github.com
-[pull-req]: https://help.github.com/articles/using-pull-requests
diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE
deleted file mode 100644
index d99f02ffac..0000000000
--- a/vendor/github.com/go-chi/chi/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md
deleted file mode 100644
index 5a8fc9d096..0000000000
--- a/vendor/github.com/go-chi/chi/README.md
+++ /dev/null
@@ -1,441 +0,0 @@
-#
-
-
-[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
-
-`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
-especially good at helping you write large REST API services that are kept maintainable as your
-project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
-handle signaling, cancelation and request-scoped values across a handler chain.
-
-The focus of the project has been to seek out an elegant and comfortable design for writing
-REST API servers, written during the development of the Pressly API service that powers our
-public API service, which in turn powers all of our client-side applications.
-
-The key considerations of chi's design are: project structure, maintainability, standard http
-handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
-parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
-included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
-
-## Install
-
-`go get -u github.com/go-chi/chi`
-
-
-## Features
-
-* **Lightweight** - cloc'd in ~1000 LOC for the chi router
-* **Fast** - yes, see [benchmarks](#benchmarks)
-* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
-* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting
-* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
-* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
-* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
-* **No external dependencies** - plain ol' Go stdlib + net/http
-
-
-## Examples
-
-See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
-
-
-**As easy as:**
-
-```go
-package main
-
-import (
- "net/http"
-
- "github.com/go-chi/chi"
- "github.com/go-chi/chi/middleware"
-)
-
-func main() {
- r := chi.NewRouter()
- r.Use(middleware.Logger)
- r.Get("/", func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("welcome"))
- })
- http.ListenAndServe(":3000", r)
-}
-```
-
-**REST Preview:**
-
-Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
-in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
-Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
-
-I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
-above, they will show you all the features of chi and serve as a good form of documentation.
-
-```go
-import (
- //...
- "context"
- "github.com/go-chi/chi"
- "github.com/go-chi/chi/middleware"
-)
-
-func main() {
- r := chi.NewRouter()
-
- // A good base middleware stack
- r.Use(middleware.RequestID)
- r.Use(middleware.RealIP)
- r.Use(middleware.Logger)
- r.Use(middleware.Recoverer)
-
- // Set a timeout value on the request context (ctx), that will signal
- // through ctx.Done() that the request has timed out and further
- // processing should be stopped.
- r.Use(middleware.Timeout(60 * time.Second))
-
- r.Get("/", func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("hi"))
- })
-
- // RESTy routes for "articles" resource
- r.Route("/articles", func(r chi.Router) {
- r.With(paginate).Get("/", listArticles) // GET /articles
- r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
-
- r.Post("/", createArticle) // POST /articles
- r.Get("/search", searchArticles) // GET /articles/search
-
- // Regexp url parameters:
- r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
-
- // Subrouters:
- r.Route("/{articleID}", func(r chi.Router) {
- r.Use(ArticleCtx)
- r.Get("/", getArticle) // GET /articles/123
- r.Put("/", updateArticle) // PUT /articles/123
- r.Delete("/", deleteArticle) // DELETE /articles/123
- })
- })
-
- // Mount the admin sub-router
- r.Mount("/admin", adminRouter())
-
- http.ListenAndServe(":3333", r)
-}
-
-func ArticleCtx(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- articleID := chi.URLParam(r, "articleID")
- article, err := dbGetArticle(articleID)
- if err != nil {
- http.Error(w, http.StatusText(404), 404)
- return
- }
- ctx := context.WithValue(r.Context(), "article", article)
- next.ServeHTTP(w, r.WithContext(ctx))
- })
-}
-
-func getArticle(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- article, ok := ctx.Value("article").(*Article)
- if !ok {
- http.Error(w, http.StatusText(422), 422)
- return
- }
- w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
-}
-
-// A completely separate router for administrator routes
-func adminRouter() http.Handler {
- r := chi.NewRouter()
- r.Use(AdminOnly)
- r.Get("/", adminIndex)
- r.Get("/accounts", adminListAccounts)
- return r
-}
-
-func AdminOnly(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- perm, ok := ctx.Value("acl.permission").(YourPermissionType)
- if !ok || !perm.IsAdmin() {
- http.Error(w, http.StatusText(403), 403)
- return
- }
- next.ServeHTTP(w, r)
- })
-}
-```
-
-
-## Router design
-
-chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
-The router is fully compatible with `net/http`.
-
-Built on top of the tree is the `Router` interface:
-
-```go
-// Router consisting of the core routing methods used by chi's Mux,
-// using only the standard net/http.
-type Router interface {
- http.Handler
- Routes
-
- // Use appends one or more middlewares onto the Router stack.
- Use(middlewares ...func(http.Handler) http.Handler)
-
- // With adds inline middlewares for an endpoint handler.
- With(middlewares ...func(http.Handler) http.Handler) Router
-
- // Group adds a new inline-Router along the current routing
- // path, with a fresh middleware stack for the inline-Router.
- Group(fn func(r Router)) Router
-
- // Route mounts a sub-Router along a `pattern`` string.
- Route(pattern string, fn func(r Router)) Router
-
- // Mount attaches another http.Handler along ./pattern/*
- Mount(pattern string, h http.Handler)
-
- // Handle and HandleFunc adds routes for `pattern` that matches
- // all HTTP methods.
- Handle(pattern string, h http.Handler)
- HandleFunc(pattern string, h http.HandlerFunc)
-
- // Method and MethodFunc adds routes for `pattern` that matches
- // the `method` HTTP method.
- Method(method, pattern string, h http.Handler)
- MethodFunc(method, pattern string, h http.HandlerFunc)
-
- // HTTP-method routing along `pattern`
- Connect(pattern string, h http.HandlerFunc)
- Delete(pattern string, h http.HandlerFunc)
- Get(pattern string, h http.HandlerFunc)
- Head(pattern string, h http.HandlerFunc)
- Options(pattern string, h http.HandlerFunc)
- Patch(pattern string, h http.HandlerFunc)
- Post(pattern string, h http.HandlerFunc)
- Put(pattern string, h http.HandlerFunc)
- Trace(pattern string, h http.HandlerFunc)
-
- // NotFound defines a handler to respond whenever a route could
- // not be found.
- NotFound(h http.HandlerFunc)
-
- // MethodNotAllowed defines a handler to respond whenever a method is
- // not allowed.
- MethodNotAllowed(h http.HandlerFunc)
-}
-
-// Routes interface adds two methods for router traversal, which is also
-// used by the github.com/go-chi/docgen package to generate documentation for Routers.
-type Routes interface {
- // Routes returns the routing tree in an easily traversable structure.
- Routes() []Route
-
- // Middlewares returns the list of middlewares in use by the router.
- Middlewares() Middlewares
-
- // Match searches the routing tree for a handler that matches
- // the method/path - similar to routing a http request, but without
- // executing the handler thereafter.
- Match(rctx *Context, method, path string) bool
-}
-```
-
-Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
-supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
-can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
-and `chi.URLParam(r, "*")` for a wildcard parameter.
-
-
-### Middleware handlers
-
-chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
-about them, which means the router and all the tooling is designed to be compatible and
-friendly with any middleware in the community. This offers much better extensibility and reuse
-of packages and is at the heart of chi's purpose.
-
-Here is an example of a standard net/http middleware handler using the new request context
-available in Go. This middleware sets a hypothetical user identifier on the request
-context and calls the next handler in the chain.
-
-```go
-// HTTP middleware setting a value on the request context
-func MyMiddleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctx := context.WithValue(r.Context(), "user", "123")
- next.ServeHTTP(w, r.WithContext(ctx))
- })
-}
-```
-
-
-### Request handlers
-
-chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
-func that reads a user identifier from the request context - hypothetically, identifying
-the user sending an authenticated request, validated+set by a previous middleware handler.
-
-```go
-// HTTP handler accessing data from the request context.
-func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
- user := r.Context().Value("user").(string)
- w.Write([]byte(fmt.Sprintf("hi %s", user)))
-}
-```
-
-
-### URL parameters
-
-chi's router parses and stores URL parameters right onto the request context. Here is
-an example of how to access URL params in your net/http handlers. And of course, middlewares
-are able to access the same information.
-
-```go
-// HTTP handler accessing the url routing parameters.
-func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
- userID := chi.URLParam(r, "userID") // from a route like /users/{userID}
-
- ctx := r.Context()
- key := ctx.Value("key").(string)
-
- w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
-}
-```
-
-
-## Middlewares
-
-chi comes equipped with an optional `middleware` package, providing a suite of standard
-`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
-with `net/http` can be used with chi's mux.
-
-### Core middlewares
-
------------------------------------------------------------------------------------------------------------
-| chi/middleware Handler | description |
-|:----------------------|:---------------------------------------------------------------------------------
-| AllowContentType | Explicit whitelist of accepted request Content-Types |
-| BasicAuth | Basic HTTP authentication |
-| Compress | Gzip compression for clients that accept compressed responses |
-| GetHead | Automatically route undefined HEAD requests to GET handlers |
-| Heartbeat | Monitoring endpoint to check the servers pulse |
-| Logger | Logs the start and end of each request with the elapsed processing time |
-| NoCache | Sets response headers to prevent clients from caching |
-| Profiler | Easily attach net/http/pprof to your routers |
-| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP |
-| Recoverer | Gracefully absorb panics and prints the stack trace |
-| RequestID | Injects a request ID into the context of each request |
-| RedirectSlashes | Redirect slashes on routing paths |
-| SetHeader | Short-hand middleware to set a response header key/value |
-| StripSlashes | Strip slashes on routing paths |
-| Throttle | Puts a ceiling on the number of concurrent requests |
-| Timeout | Signals to the request context when the timeout deadline is reached |
-| URLFormat | Parse extension from url and put it on request context |
-| WithValue | Short-hand middleware to set a key/value on the request context |
------------------------------------------------------------------------------------------------------------
-
-### Extra middlewares & packages
-
-Please see https://github.com/go-chi for additional packages.
-
---------------------------------------------------------------------------------------------------------------------
-| package | description |
-|:---------------------------------------------------|:-------------------------------------------------------------
-| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
-| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
-| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
-| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
-| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
-| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
-| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
-| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
-| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
---------------------------------------------------------------------------------------------------------------------
-
-please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware
-
-
-## context?
-
-`context` is a tiny pkg that provides simple interface to signal context across call stacks
-and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
-and is available in stdlib since go1.7.
-
-Learn more at https://blog.golang.org/context
-
-and..
-* Docs: https://golang.org/pkg/context
-* Source: https://github.com/golang/go/tree/master/src/context
-
-
-## Benchmarks
-
-The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
-
-Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop
-
-```shell
-BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op
-BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op
-BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op
-BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op
-BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op
-```
-
-Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
-
-NOTE: the allocs in the benchmark above are from the calls to http.Request's
-`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
-on the duplicated (alloc'd) request and returns it the new request object. This is just
-how setting context on a request in Go works.
-
-
-## Credits
-
-* Carl Jackson for https://github.com/zenazn/goji
- * Parts of chi's thinking comes from goji, and chi's middleware package
- sources from goji.
-* Armon Dadgar for https://github.com/armon/go-radix
-* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
-
-We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
-
-
-## Beyond REST
-
-chi is just a http router that lets you decompose request handling into many smaller layers.
-Many companies use chi to write REST services for their public APIs. But, REST is just a convention
-for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
-system or network of microservices.
-
-Looking beyond REST, I also recommend some newer works in the field:
-* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
-* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
-* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
-* [NATS](https://nats.io) - lightweight pub-sub
-
-
-## License
-
-Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
-
-Licensed under [MIT License](./LICENSE)
-
-[GoDoc]: https://godoc.org/github.com/go-chi/chi
-[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
-[Travis]: https://travis-ci.org/go-chi/chi
-[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go
deleted file mode 100644
index 88e6846138..0000000000
--- a/vendor/github.com/go-chi/chi/chain.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package chi
-
-import "net/http"
-
-// Chain returns a Middlewares type from a slice of middleware handlers.
-func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
- return Middlewares(middlewares)
-}
-
-// Handler builds and returns a http.Handler from the chain of middlewares,
-// with `h http.Handler` as the final handler.
-func (mws Middlewares) Handler(h http.Handler) http.Handler {
- return &ChainHandler{mws, h, chain(mws, h)}
-}
-
-// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
-// with `h http.Handler` as the final handler.
-func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
- return &ChainHandler{mws, h, chain(mws, h)}
-}
-
-// ChainHandler is a http.Handler with support for handler composition and
-// execution.
-type ChainHandler struct {
- Middlewares Middlewares
- Endpoint http.Handler
- chain http.Handler
-}
-
-func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- c.chain.ServeHTTP(w, r)
-}
-
-// chain builds a http.Handler composed of an inline middleware stack and endpoint
-// handler in the order they are passed.
-func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
- // Return ahead of time if there aren't any middlewares for the chain
- if len(middlewares) == 0 {
- return endpoint
- }
-
- // Wrap the end handler with the middleware chain
- h := middlewares[len(middlewares)-1](endpoint)
- for i := len(middlewares) - 2; i >= 0; i-- {
- h = middlewares[i](h)
- }
-
- return h
-}
diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go
deleted file mode 100644
index b7063dc297..0000000000
--- a/vendor/github.com/go-chi/chi/chi.go
+++ /dev/null
@@ -1,134 +0,0 @@
-//
-// Package chi is a small, idiomatic and composable router for building HTTP services.
-//
-// chi requires Go 1.10 or newer.
-//
-// Example:
-// package main
-//
-// import (
-// "net/http"
-//
-// "github.com/go-chi/chi"
-// "github.com/go-chi/chi/middleware"
-// )
-//
-// func main() {
-// r := chi.NewRouter()
-// r.Use(middleware.Logger)
-// r.Use(middleware.Recoverer)
-//
-// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
-// w.Write([]byte("root."))
-// })
-//
-// http.ListenAndServe(":3333", r)
-// }
-//
-// See github.com/go-chi/chi/_examples/ for more in-depth examples.
-//
-// URL patterns allow for easy matching of path components in HTTP
-// requests. The matching components can then be accessed using
-// chi.URLParam(). All patterns must begin with a slash.
-//
-// A simple named placeholder {name} matches any sequence of characters
-// up to the next / or the end of the URL. Trailing slashes on paths must
-// be handled explicitly.
-//
-// A placeholder with a name followed by a colon allows a regular
-// expression match, for example {number:\\d+}. The regular expression
-// syntax is Go's normal regexp RE2 syntax, except that regular expressions
-// including { or } are not supported, and / will never be
-// matched. An anonymous regexp pattern is allowed, using an empty string
-// before the colon in the placeholder, such as {:\\d+}
-//
-// The special placeholder of asterisk matches the rest of the requested
-// URL. Any trailing characters in the pattern are ignored. This is the only
-// placeholder which will match / characters.
-//
-// Examples:
-// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
-// "/user/{name}/info" matches "/user/jsmith/info"
-// "/page/*" matches "/page/intro/latest"
-// "/page/*/index" also matches "/page/intro/latest"
-// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
-//
-package chi
-
-import "net/http"
-
-// NewRouter returns a new Mux object that implements the Router interface.
-func NewRouter() *Mux {
- return NewMux()
-}
-
-// Router consisting of the core routing methods used by chi's Mux,
-// using only the standard net/http.
-type Router interface {
- http.Handler
- Routes
-
- // Use appends one or more middlewares onto the Router stack.
- Use(middlewares ...func(http.Handler) http.Handler)
-
- // With adds inline middlewares for an endpoint handler.
- With(middlewares ...func(http.Handler) http.Handler) Router
-
- // Group adds a new inline-Router along the current routing
- // path, with a fresh middleware stack for the inline-Router.
- Group(fn func(r Router)) Router
-
- // Route mounts a sub-Router along a `pattern`` string.
- Route(pattern string, fn func(r Router)) Router
-
- // Mount attaches another http.Handler along ./pattern/*
- Mount(pattern string, h http.Handler)
-
- // Handle and HandleFunc adds routes for `pattern` that matches
- // all HTTP methods.
- Handle(pattern string, h http.Handler)
- HandleFunc(pattern string, h http.HandlerFunc)
-
- // Method and MethodFunc adds routes for `pattern` that matches
- // the `method` HTTP method.
- Method(method, pattern string, h http.Handler)
- MethodFunc(method, pattern string, h http.HandlerFunc)
-
- // HTTP-method routing along `pattern`
- Connect(pattern string, h http.HandlerFunc)
- Delete(pattern string, h http.HandlerFunc)
- Get(pattern string, h http.HandlerFunc)
- Head(pattern string, h http.HandlerFunc)
- Options(pattern string, h http.HandlerFunc)
- Patch(pattern string, h http.HandlerFunc)
- Post(pattern string, h http.HandlerFunc)
- Put(pattern string, h http.HandlerFunc)
- Trace(pattern string, h http.HandlerFunc)
-
- // NotFound defines a handler to respond whenever a route could
- // not be found.
- NotFound(h http.HandlerFunc)
-
- // MethodNotAllowed defines a handler to respond whenever a method is
- // not allowed.
- MethodNotAllowed(h http.HandlerFunc)
-}
-
-// Routes interface adds two methods for router traversal, which is also
-// used by the `docgen` subpackage to generation documentation for Routers.
-type Routes interface {
- // Routes returns the routing tree in an easily traversable structure.
- Routes() []Route
-
- // Middlewares returns the list of middlewares in use by the router.
- Middlewares() Middlewares
-
- // Match searches the routing tree for a handler that matches
- // the method/path - similar to routing a http request, but without
- // executing the handler thereafter.
- Match(rctx *Context, method, path string) bool
-}
-
-// Middlewares type is a slice of standard middleware handlers with methods
-// to compose middleware chains and http.Handler's.
-type Middlewares []func(http.Handler) http.Handler
diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go
deleted file mode 100644
index 26c609ea2c..0000000000
--- a/vendor/github.com/go-chi/chi/context.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package chi
-
-import (
- "context"
- "net"
- "net/http"
- "strings"
-)
-
-// URLParam returns the url parameter from a http.Request object.
-func URLParam(r *http.Request, key string) string {
- if rctx := RouteContext(r.Context()); rctx != nil {
- return rctx.URLParam(key)
- }
- return ""
-}
-
-// URLParamFromCtx returns the url parameter from a http.Request Context.
-func URLParamFromCtx(ctx context.Context, key string) string {
- if rctx := RouteContext(ctx); rctx != nil {
- return rctx.URLParam(key)
- }
- return ""
-}
-
-// RouteContext returns chi's routing Context object from a
-// http.Request Context.
-func RouteContext(ctx context.Context) *Context {
- val, _ := ctx.Value(RouteCtxKey).(*Context)
- return val
-}
-
-// ServerBaseContext wraps an http.Handler to set the request context to the
-// `baseCtx`.
-func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler {
- fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- baseCtx := baseCtx
-
- // Copy over default net/http server context keys
- if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok {
- baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v)
- }
- if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok {
- baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v)
- }
-
- h.ServeHTTP(w, r.WithContext(baseCtx))
- })
- return fn
-}
-
-// NewRouteContext returns a new routing Context object.
-func NewRouteContext() *Context {
- return &Context{}
-}
-
-var (
- // RouteCtxKey is the context.Context key to store the request context.
- RouteCtxKey = &contextKey{"RouteContext"}
-)
-
-// Context is the default routing context set on the root node of a
-// request context to track route patterns, URL parameters and
-// an optional routing path.
-type Context struct {
- Routes Routes
-
- // Routing path/method override used during the route search.
- // See Mux#routeHTTP method.
- RoutePath string
- RouteMethod string
-
- // Routing pattern stack throughout the lifecycle of the request,
- // across all connected routers. It is a record of all matching
- // patterns across a stack of sub-routers.
- RoutePatterns []string
-
- // URLParams are the stack of routeParams captured during the
- // routing lifecycle across a stack of sub-routers.
- URLParams RouteParams
-
- // The endpoint routing pattern that matched the request URI path
- // or `RoutePath` of the current sub-router. This value will update
- // during the lifecycle of a request passing through a stack of
- // sub-routers.
- routePattern string
-
- // Route parameters matched for the current sub-router. It is
- // intentionally unexported so it cant be tampered.
- routeParams RouteParams
-
- // methodNotAllowed hint
- methodNotAllowed bool
-}
-
-// Reset a routing context to its initial state.
-func (x *Context) Reset() {
- x.Routes = nil
- x.RoutePath = ""
- x.RouteMethod = ""
- x.RoutePatterns = x.RoutePatterns[:0]
- x.URLParams.Keys = x.URLParams.Keys[:0]
- x.URLParams.Values = x.URLParams.Values[:0]
-
- x.routePattern = ""
- x.routeParams.Keys = x.routeParams.Keys[:0]
- x.routeParams.Values = x.routeParams.Values[:0]
- x.methodNotAllowed = false
-}
-
-// URLParam returns the corresponding URL parameter value from the request
-// routing context.
-func (x *Context) URLParam(key string) string {
- for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
- if x.URLParams.Keys[k] == key {
- return x.URLParams.Values[k]
- }
- }
- return ""
-}
-
-// RoutePattern builds the routing pattern string for the particular
-// request, at the particular point during routing. This means, the value
-// will change throughout the execution of a request in a router. That is
-// why its advised to only use this value after calling the next handler.
-//
-// For example,
-//
-// func Instrument(next http.Handler) http.Handler {
-// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-// next.ServeHTTP(w, r)
-// routePattern := chi.RouteContext(r.Context()).RoutePattern()
-// measure(w, r, routePattern)
-// })
-// }
-func (x *Context) RoutePattern() string {
- routePattern := strings.Join(x.RoutePatterns, "")
- return replaceWildcards(routePattern)
-}
-
-// replaceWildcards takes a route pattern and recursively replaces all
-// occurrences of "/*/" to "/".
-func replaceWildcards(p string) string {
- if strings.Contains(p, "/*/") {
- return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
- }
-
- return p
-}
-
-// RouteParams is a structure to track URL routing parameters efficiently.
-type RouteParams struct {
- Keys, Values []string
-}
-
-// Add will append a URL parameter to the end of the route param
-func (s *RouteParams) Add(key, value string) {
- s.Keys = append(s.Keys, key)
- s.Values = append(s.Values, value)
-}
-
-// contextKey is a value for use with context.WithValue. It's used as
-// a pointer so it fits in an interface{} without allocation. This technique
-// for defining context keys was copied from Go 1.7's new use of context in net/http.
-type contextKey struct {
- name string
-}
-
-func (k *contextKey) String() string {
- return "chi context value " + k.name
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/middleware/basic_auth.go
deleted file mode 100644
index 87b2641a6a..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/basic_auth.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package middleware
-
-import (
- "fmt"
- "net/http"
-)
-
-// BasicAuth implements a simple middleware handler for adding basic http auth to a route.
-func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- user, pass, ok := r.BasicAuth()
- if !ok {
- basicAuthFailed(w, realm)
- return
- }
-
- credPass, credUserOk := creds[user]
- if !credUserOk || pass != credPass {
- basicAuthFailed(w, realm)
- return
- }
-
- next.ServeHTTP(w, r)
- })
- }
-}
-
-func basicAuthFailed(w http.ResponseWriter, realm string) {
- w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
- w.WriteHeader(http.StatusUnauthorized)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go
deleted file mode 100644
index 2f40cc15af..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/compress.go
+++ /dev/null
@@ -1,399 +0,0 @@
-package middleware
-
-import (
- "bufio"
- "compress/flate"
- "compress/gzip"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "strings"
- "sync"
-)
-
-var defaultCompressibleContentTypes = []string{
- "text/html",
- "text/css",
- "text/plain",
- "text/javascript",
- "application/javascript",
- "application/x-javascript",
- "application/json",
- "application/atom+xml",
- "application/rss+xml",
- "image/svg+xml",
-}
-
-// Compress is a middleware that compresses response
-// body of a given content types to a data format based
-// on Accept-Encoding request header. It uses a given
-// compression level.
-//
-// NOTE: make sure to set the Content-Type header on your response
-// otherwise this middleware will not compress the response body. For ex, in
-// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
-// or set it manually.
-//
-// Passing a compression level of 5 is sensible value
-func Compress(level int, types ...string) func(next http.Handler) http.Handler {
- compressor := NewCompressor(level, types...)
- return compressor.Handler
-}
-
-// Compressor represents a set of encoding configurations.
-type Compressor struct {
- level int // The compression level.
- // The mapping of encoder names to encoder functions.
- encoders map[string]EncoderFunc
- // The mapping of pooled encoders to pools.
- pooledEncoders map[string]*sync.Pool
- // The set of content types allowed to be compressed.
- allowedTypes map[string]struct{}
- allowedWildcards map[string]struct{}
- // The list of encoders in order of decreasing precedence.
- encodingPrecedence []string
-}
-
-// NewCompressor creates a new Compressor that will handle encoding responses.
-//
-// The level should be one of the ones defined in the flate package.
-// The types are the content types that are allowed to be compressed.
-func NewCompressor(level int, types ...string) *Compressor {
- // If types are provided, set those as the allowed types. If none are
- // provided, use the default list.
- allowedTypes := make(map[string]struct{})
- allowedWildcards := make(map[string]struct{})
- if len(types) > 0 {
- for _, t := range types {
- if strings.Contains(strings.TrimSuffix(t, "/*"), "*") {
- panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t))
- }
- if strings.HasSuffix(t, "/*") {
- allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{}
- } else {
- allowedTypes[t] = struct{}{}
- }
- }
- } else {
- for _, t := range defaultCompressibleContentTypes {
- allowedTypes[t] = struct{}{}
- }
- }
-
- c := &Compressor{
- level: level,
- encoders: make(map[string]EncoderFunc),
- pooledEncoders: make(map[string]*sync.Pool),
- allowedTypes: allowedTypes,
- allowedWildcards: allowedWildcards,
- }
-
- // Set the default encoders. The precedence order uses the reverse
- // ordering that the encoders were added. This means adding new encoders
- // will move them to the front of the order.
- //
- // TODO:
- // lzma: Opera.
- // sdch: Chrome, Android. Gzip output + dictionary header.
- // br: Brotli, see https://github.com/go-chi/chi/pull/326
-
- // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
- // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
- // checksum compared to CRC-32 used in "gzip" and thus is faster.
- //
- // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
- // raw DEFLATE data only, without the mentioned zlib wrapper.
- // Because of this major confusion, most modern browsers try it
- // both ways, first looking for zlib headers.
- // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
- //
- // The list of browsers having problems is quite big, see:
- // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
- // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
- //
- // That's why we prefer gzip over deflate. It's just more reliable
- // and not significantly slower than gzip.
- c.SetEncoder("deflate", encoderDeflate)
-
- // TODO: Exception for old MSIE browsers that can't handle non-HTML?
- // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
- c.SetEncoder("gzip", encoderGzip)
-
- // NOTE: Not implemented, intentionally:
- // case "compress": // LZW. Deprecated.
- // case "bzip2": // Too slow on-the-fly.
- // case "zopfli": // Too slow on-the-fly.
- // case "xz": // Too slow on-the-fly.
- return c
-}
-
-// SetEncoder can be used to set the implementation of a compression algorithm.
-//
-// The encoding should be a standardised identifier. See:
-// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
-//
-// For example, add the Brotli algortithm:
-//
-// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
-//
-// compressor := middleware.NewCompressor(5, "text/html")
-// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
-// params := brotli_enc.NewBrotliParams()
-// params.SetQuality(level)
-// return brotli_enc.NewBrotliWriter(params, w)
-// })
-func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
- encoding = strings.ToLower(encoding)
- if encoding == "" {
- panic("the encoding can not be empty")
- }
- if fn == nil {
- panic("attempted to set a nil encoder function")
- }
-
- // If we are adding a new encoder that is already registered, we have to
- // clear that one out first.
- if _, ok := c.pooledEncoders[encoding]; ok {
- delete(c.pooledEncoders, encoding)
- }
- if _, ok := c.encoders[encoding]; ok {
- delete(c.encoders, encoding)
- }
-
- // If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
- encoder := fn(ioutil.Discard, c.level)
- if encoder != nil {
- if _, ok := encoder.(ioResetterWriter); ok {
- pool := &sync.Pool{
- New: func() interface{} {
- return fn(ioutil.Discard, c.level)
- },
- }
- c.pooledEncoders[encoding] = pool
- }
- }
- // If the encoder is not in the pooledEncoders, add it to the normal encoders.
- if _, ok := c.pooledEncoders[encoding]; !ok {
- c.encoders[encoding] = fn
- }
-
- for i, v := range c.encodingPrecedence {
- if v == encoding {
- c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
- }
- }
-
- c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
-}
-
-// Handler returns a new middleware that will compress the response based on the
-// current Compressor.
-func (c *Compressor) Handler(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- encoder, encoding, cleanup := c.selectEncoder(r.Header, w)
-
- cw := &compressResponseWriter{
- ResponseWriter: w,
- w: w,
- contentTypes: c.allowedTypes,
- contentWildcards: c.allowedWildcards,
- encoding: encoding,
- compressable: false, // determined in post-handler
- }
- if encoder != nil {
- cw.w = encoder
- }
- // Re-add the encoder to the pool if applicable.
- defer cleanup()
- defer cw.Close()
-
- next.ServeHTTP(cw, r)
- })
-}
-
-// selectEncoder returns the encoder, the name of the encoder, and a closer function.
-func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) {
- header := h.Get("Accept-Encoding")
-
- // Parse the names of all accepted algorithms from the header.
- accepted := strings.Split(strings.ToLower(header), ",")
-
- // Find supported encoder by accepted list by precedence
- for _, name := range c.encodingPrecedence {
- if matchAcceptEncoding(accepted, name) {
- if pool, ok := c.pooledEncoders[name]; ok {
- encoder := pool.Get().(ioResetterWriter)
- cleanup := func() {
- pool.Put(encoder)
- }
- encoder.Reset(w)
- return encoder, name, cleanup
-
- }
- if fn, ok := c.encoders[name]; ok {
- return fn(w, c.level), name, func() {}
- }
- }
-
- }
-
- // No encoder found to match the accepted encoding
- return nil, "", func() {}
-}
-
-func matchAcceptEncoding(accepted []string, encoding string) bool {
- for _, v := range accepted {
- if strings.Contains(v, encoding) {
- return true
- }
- }
- return false
-}
-
-// An EncoderFunc is a function that wraps the provided io.Writer with a
-// streaming compression algorithm and returns it.
-//
-// In case of failure, the function should return nil.
-type EncoderFunc func(w io.Writer, level int) io.Writer
-
-// Interface for types that allow resetting io.Writers.
-type ioResetterWriter interface {
- io.Writer
- Reset(w io.Writer)
-}
-
-type compressResponseWriter struct {
- http.ResponseWriter
-
- // The streaming encoder writer to be used if there is one. Otherwise,
- // this is just the normal writer.
- w io.Writer
- encoding string
- contentTypes map[string]struct{}
- contentWildcards map[string]struct{}
- wroteHeader bool
- compressable bool
-}
-
-func (cw *compressResponseWriter) isCompressable() bool {
- // Parse the first part of the Content-Type response header.
- contentType := cw.Header().Get("Content-Type")
- if idx := strings.Index(contentType, ";"); idx >= 0 {
- contentType = contentType[0:idx]
- }
-
- // Is the content type compressable?
- if _, ok := cw.contentTypes[contentType]; ok {
- return true
- }
- if idx := strings.Index(contentType, "/"); idx > 0 {
- contentType = contentType[0:idx]
- _, ok := cw.contentWildcards[contentType]
- return ok
- }
- return false
-}
-
-func (cw *compressResponseWriter) WriteHeader(code int) {
- if cw.wroteHeader {
- cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
- return
- }
- cw.wroteHeader = true
- defer cw.ResponseWriter.WriteHeader(code)
-
- // Already compressed data?
- if cw.Header().Get("Content-Encoding") != "" {
- return
- }
-
- if !cw.isCompressable() {
- cw.compressable = false
- return
- }
-
- if cw.encoding != "" {
- cw.compressable = true
- cw.Header().Set("Content-Encoding", cw.encoding)
- cw.Header().Set("Vary", "Accept-Encoding")
-
- // The content-length after compression is unknown
- cw.Header().Del("Content-Length")
- }
-}
-
-func (cw *compressResponseWriter) Write(p []byte) (int, error) {
- if !cw.wroteHeader {
- cw.WriteHeader(http.StatusOK)
- }
-
- return cw.writer().Write(p)
-}
-
-func (cw *compressResponseWriter) writer() io.Writer {
- if cw.compressable {
- return cw.w
- } else {
- return cw.ResponseWriter
- }
-}
-
-type compressFlusher interface {
- Flush() error
-}
-
-func (cw *compressResponseWriter) Flush() {
- if f, ok := cw.writer().(http.Flusher); ok {
- f.Flush()
- }
- // If the underlying writer has a compression flush signature,
- // call this Flush() method instead
- if f, ok := cw.writer().(compressFlusher); ok {
- f.Flush()
-
- // Also flush the underlying response writer
- if f, ok := cw.ResponseWriter.(http.Flusher); ok {
- f.Flush()
- }
- }
-}
-
-func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- if hj, ok := cw.writer().(http.Hijacker); ok {
- return hj.Hijack()
- }
- return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
-}
-
-func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
- if ps, ok := cw.writer().(http.Pusher); ok {
- return ps.Push(target, opts)
- }
- return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
-}
-
-func (cw *compressResponseWriter) Close() error {
- if c, ok := cw.writer().(io.WriteCloser); ok {
- return c.Close()
- }
- return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
-}
-
-func encoderGzip(w io.Writer, level int) io.Writer {
- gw, err := gzip.NewWriterLevel(w, level)
- if err != nil {
- return nil
- }
- return gw
-}
-
-func encoderDeflate(w io.Writer, level int) io.Writer {
- dw, err := flate.NewWriter(w, level)
- if err != nil {
- return nil
- }
- return dw
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go
deleted file mode 100644
index 07b5ce6f66..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/content_charset.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strings"
-)
-
-// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
-// An empty charset will allow requests with no Content-Type header or no specified charset.
-func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
- for i, c := range charsets {
- charsets[i] = strings.ToLower(c)
- }
-
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
- w.WriteHeader(http.StatusUnsupportedMediaType)
- return
- }
-
- next.ServeHTTP(w, r)
- })
- }
-}
-
-// Check the content encoding against a list of acceptable values.
-func contentEncoding(ce string, charsets ...string) bool {
- _, ce = split(strings.ToLower(ce), ";")
- _, ce = split(ce, "charset=")
- ce, _ = split(ce, ";")
- for _, c := range charsets {
- if ce == c {
- return true
- }
- }
-
- return false
-}
-
-// Split a string in two parts, cleaning any whitespace.
-func split(str, sep string) (string, string) {
- var a, b string
- var parts = strings.SplitN(str, sep, 2)
- a = strings.TrimSpace(parts[0])
- if len(parts) == 2 {
- b = strings.TrimSpace(parts[1])
- }
-
- return a, b
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/middleware/content_encoding.go
deleted file mode 100644
index e0b9ccc08a..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/content_encoding.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strings"
-)
-
-// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
-// with a 415 Unsupported Media Type status.
-func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler {
- allowedEncodings := make(map[string]struct{}, len(contentEncoding))
- for _, encoding := range contentEncoding {
- allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{}
- }
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- requestEncodings := r.Header["Content-Encoding"]
- // skip check for empty content body or no Content-Encoding
- if r.ContentLength == 0 {
- next.ServeHTTP(w, r)
- return
- }
- // All encodings in the request must be allowed
- for _, encoding := range requestEncodings {
- if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok {
- w.WriteHeader(http.StatusUnsupportedMediaType)
- return
- }
- }
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
- }
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go
deleted file mode 100644
index ee4957874f..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/content_type.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strings"
-)
-
-// SetHeader is a convenience handler to set a response header key/value
-func SetHeader(key, value string) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set(key, value)
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
- }
-}
-
-// AllowContentType enforces a whitelist of request Content-Types otherwise responds
-// with a 415 Unsupported Media Type status.
-func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
- cT := []string{}
- for _, t := range contentTypes {
- cT = append(cT, strings.ToLower(t))
- }
-
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- if r.ContentLength == 0 {
- // skip check for empty content body
- next.ServeHTTP(w, r)
- return
- }
-
- s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
- if i := strings.Index(s, ";"); i > -1 {
- s = s[0:i]
- }
-
- for _, t := range cT {
- if t == s {
- next.ServeHTTP(w, r)
- return
- }
- }
-
- w.WriteHeader(http.StatusUnsupportedMediaType)
- }
- return http.HandlerFunc(fn)
- }
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go
deleted file mode 100644
index 86068a96db..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/get_head.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package middleware
-
-import (
- "net/http"
-
- "github.com/go-chi/chi"
-)
-
-// GetHead automatically route undefined HEAD requests to GET handlers.
-func GetHead(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.Method == "HEAD" {
- rctx := chi.RouteContext(r.Context())
- routePath := rctx.RoutePath
- if routePath == "" {
- if r.URL.RawPath != "" {
- routePath = r.URL.RawPath
- } else {
- routePath = r.URL.Path
- }
- }
-
- // Temporary routing context to look-ahead before routing the request
- tctx := chi.NewRouteContext()
-
- // Attempt to find a HEAD handler for the routing path, if not found, traverse
- // the router as through its a GET route, but proceed with the request
- // with the HEAD method.
- if !rctx.Routes.Match(tctx, "HEAD", routePath) {
- rctx.RouteMethod = "GET"
- rctx.RoutePath = routePath
- next.ServeHTTP(w, r)
- return
- }
- }
-
- next.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
deleted file mode 100644
index fe822fb536..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/heartbeat.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strings"
-)
-
-// Heartbeat endpoint middleware useful to setting up a path like
-// `/ping` that load balancers or uptime testing external services
-// can make a request before hitting any routes. It's also convenient
-// to place this above ACL middlewares as well.
-func Heartbeat(endpoint string) func(http.Handler) http.Handler {
- f := func(h http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) {
- w.Header().Set("Content-Type", "text/plain")
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("."))
- return
- }
- h.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
- }
- return f
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go
deleted file mode 100644
index 158a6a3905..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/logger.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package middleware
-
-import (
- "bytes"
- "context"
- "log"
- "net/http"
- "os"
- "time"
-)
-
-var (
- // LogEntryCtxKey is the context.Context key to store the request log entry.
- LogEntryCtxKey = &contextKey{"LogEntry"}
-
- // DefaultLogger is called by the Logger middleware handler to log each request.
- // Its made a package-level variable so that it can be reconfigured for custom
- // logging configurations.
- DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false})
-)
-
-// Logger is a middleware that logs the start and end of each request, along
-// with some useful data about what was requested, what the response status was,
-// and how long it took to return. When standard output is a TTY, Logger will
-// print in color, otherwise it will print in black and white. Logger prints a
-// request ID if one is provided.
-//
-// Alternatively, look at https://github.com/goware/httplog for a more in-depth
-// http logger with structured logging support.
-func Logger(next http.Handler) http.Handler {
- return DefaultLogger(next)
-}
-
-// RequestLogger returns a logger handler using a custom LogFormatter.
-func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- entry := f.NewLogEntry(r)
- ww := NewWrapResponseWriter(w, r.ProtoMajor)
-
- t1 := time.Now()
- defer func() {
- entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)
- }()
-
- next.ServeHTTP(ww, WithLogEntry(r, entry))
- }
- return http.HandlerFunc(fn)
- }
-}
-
-// LogFormatter initiates the beginning of a new LogEntry per request.
-// See DefaultLogFormatter for an example implementation.
-type LogFormatter interface {
- NewLogEntry(r *http.Request) LogEntry
-}
-
-// LogEntry records the final log when a request completes.
-// See defaultLogEntry for an example implementation.
-type LogEntry interface {
- Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{})
- Panic(v interface{}, stack []byte)
-}
-
-// GetLogEntry returns the in-context LogEntry for a request.
-func GetLogEntry(r *http.Request) LogEntry {
- entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
- return entry
-}
-
-// WithLogEntry sets the in-context LogEntry for a request.
-func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
- r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
- return r
-}
-
-// LoggerInterface accepts printing to stdlib logger or compatible logger.
-type LoggerInterface interface {
- Print(v ...interface{})
-}
-
-// DefaultLogFormatter is a simple logger that implements a LogFormatter.
-type DefaultLogFormatter struct {
- Logger LoggerInterface
- NoColor bool
-}
-
-// NewLogEntry creates a new LogEntry for the request.
-func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
- useColor := !l.NoColor
- entry := &defaultLogEntry{
- DefaultLogFormatter: l,
- request: r,
- buf: &bytes.Buffer{},
- useColor: useColor,
- }
-
- reqID := GetReqID(r.Context())
- if reqID != "" {
- cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
- }
- cW(entry.buf, useColor, nCyan, "\"")
- cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
-
- scheme := "http"
- if r.TLS != nil {
- scheme = "https"
- }
- cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
-
- entry.buf.WriteString("from ")
- entry.buf.WriteString(r.RemoteAddr)
- entry.buf.WriteString(" - ")
-
- return entry
-}
-
-type defaultLogEntry struct {
- *DefaultLogFormatter
- request *http.Request
- buf *bytes.Buffer
- useColor bool
-}
-
-func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
- switch {
- case status < 200:
- cW(l.buf, l.useColor, bBlue, "%03d", status)
- case status < 300:
- cW(l.buf, l.useColor, bGreen, "%03d", status)
- case status < 400:
- cW(l.buf, l.useColor, bCyan, "%03d", status)
- case status < 500:
- cW(l.buf, l.useColor, bYellow, "%03d", status)
- default:
- cW(l.buf, l.useColor, bRed, "%03d", status)
- }
-
- cW(l.buf, l.useColor, bBlue, " %dB", bytes)
-
- l.buf.WriteString(" in ")
- if elapsed < 500*time.Millisecond {
- cW(l.buf, l.useColor, nGreen, "%s", elapsed)
- } else if elapsed < 5*time.Second {
- cW(l.buf, l.useColor, nYellow, "%s", elapsed)
- } else {
- cW(l.buf, l.useColor, nRed, "%s", elapsed)
- }
-
- l.Logger.Print(l.buf.String())
-}
-
-func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
- PrintPrettyStack(v)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go
deleted file mode 100644
index cc371e00a8..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/middleware.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package middleware
-
-import "net/http"
-
-// New will create a new middleware handler from a http.Handler.
-func New(h http.Handler) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- h.ServeHTTP(w, r)
- })
- }
-}
-
-// contextKey is a value for use with context.WithValue. It's used as
-// a pointer so it fits in an interface{} without allocation. This technique
-// for defining context keys was copied from Go 1.7's new use of context in net/http.
-type contextKey struct {
- name string
-}
-
-func (k *contextKey) String() string {
- return "chi/middleware context value " + k.name
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go
deleted file mode 100644
index 2412829e1b..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/nocache.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package middleware
-
-// Ported from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "net/http"
- "time"
-)
-
-// Unix epoch time
-var epoch = time.Unix(0, 0).Format(time.RFC1123)
-
-// Taken from https://github.com/mytrile/nocache
-var noCacheHeaders = map[string]string{
- "Expires": epoch,
- "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
- "Pragma": "no-cache",
- "X-Accel-Expires": "0",
-}
-
-var etagHeaders = []string{
- "ETag",
- "If-Modified-Since",
- "If-Match",
- "If-None-Match",
- "If-Range",
- "If-Unmodified-Since",
-}
-
-// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
-// a router (or subrouter) from being cached by an upstream proxy and/or client.
-//
-// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
-// Expires: Thu, 01 Jan 1970 00:00:00 UTC
-// Cache-Control: no-cache, private, max-age=0
-// X-Accel-Expires: 0
-// Pragma: no-cache (for HTTP/1.0 proxies/clients)
-func NoCache(h http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
-
- // Delete any ETag headers that may have been set
- for _, v := range etagHeaders {
- if r.Header.Get(v) != "" {
- r.Header.Del(v)
- }
- }
-
- // Set our NoCache headers
- for k, v := range noCacheHeaders {
- w.Header().Set(k, v)
- }
-
- h.ServeHTTP(w, r)
- }
-
- return http.HandlerFunc(fn)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go
deleted file mode 100644
index 1d44b8259a..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/profiler.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package middleware
-
-import (
- "expvar"
- "fmt"
- "net/http"
- "net/http/pprof"
-
- "github.com/go-chi/chi"
-)
-
-// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
-//
-// func MyService() http.Handler {
-// r := chi.NewRouter()
-// // ..middlewares
-// r.Mount("/debug", middleware.Profiler())
-// // ..routes
-// return r
-// }
-func Profiler() http.Handler {
- r := chi.NewRouter()
- r.Use(NoCache)
-
- r.Get("/", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, r.RequestURI+"/pprof/", 301)
- })
- r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, r.RequestURI+"/", 301)
- })
-
- r.HandleFunc("/pprof/*", pprof.Index)
- r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
- r.HandleFunc("/pprof/profile", pprof.Profile)
- r.HandleFunc("/pprof/symbol", pprof.Symbol)
- r.HandleFunc("/pprof/trace", pprof.Trace)
- r.HandleFunc("/vars", expVars)
-
- return r
-}
-
-// Replicated from expvar.go as not public.
-func expVars(w http.ResponseWriter, r *http.Request) {
- first := true
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, "{\n")
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go
deleted file mode 100644
index 72db6ca9f5..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/realip.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package middleware
-
-// Ported from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "net/http"
- "strings"
-)
-
-var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
-var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
-
-// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
-// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
-// order).
-//
-// This middleware should be inserted fairly early in the middleware stack to
-// ensure that subsequent layers (e.g., request loggers) which examine the
-// RemoteAddr will see the intended value.
-//
-// You should only use this middleware if you can trust the headers passed to
-// you (in particular, the two headers this middleware uses), for example
-// because you have placed a reverse proxy like HAProxy or nginx in front of
-// chi. If your reverse proxies are configured to pass along arbitrary header
-// values from the client, or if you use this middleware without a reverse
-// proxy, malicious clients will be able to make you very sad (or, depending on
-// how you're using RemoteAddr, vulnerable to an attack of some sort).
-func RealIP(h http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- if rip := realIP(r); rip != "" {
- r.RemoteAddr = rip
- }
- h.ServeHTTP(w, r)
- }
-
- return http.HandlerFunc(fn)
-}
-
-func realIP(r *http.Request) string {
- var ip string
-
- if xrip := r.Header.Get(xRealIP); xrip != "" {
- ip = xrip
- } else if xff := r.Header.Get(xForwardedFor); xff != "" {
- i := strings.Index(xff, ", ")
- if i == -1 {
- i = len(xff)
- }
- ip = xff[:i]
- }
-
- return ip
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go
deleted file mode 100644
index 785b18c52b..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/recoverer.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package middleware
-
-// The original work was derived from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "bytes"
- "errors"
- "fmt"
- "net/http"
- "os"
- "runtime/debug"
- "strings"
-)
-
-// Recoverer is a middleware that recovers from panics, logs the panic (and a
-// backtrace), and returns a HTTP 500 (Internal Server Error) status if
-// possible. Recoverer prints a request ID if one is provided.
-//
-// Alternatively, look at https://github.com/pressly/lg middleware pkgs.
-func Recoverer(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- defer func() {
- if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler {
-
- logEntry := GetLogEntry(r)
- if logEntry != nil {
- logEntry.Panic(rvr, debug.Stack())
- } else {
- PrintPrettyStack(rvr)
- }
-
- w.WriteHeader(http.StatusInternalServerError)
- }
- }()
-
- next.ServeHTTP(w, r)
- }
-
- return http.HandlerFunc(fn)
-}
-
-func PrintPrettyStack(rvr interface{}) {
- debugStack := debug.Stack()
- s := prettyStack{}
- out, err := s.parse(debugStack, rvr)
- if err == nil {
- os.Stderr.Write(out)
- } else {
- // print stdlib output as a fallback
- os.Stderr.Write(debugStack)
- }
-}
-
-type prettyStack struct {
-}
-
-func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) {
- var err error
- useColor := true
- buf := &bytes.Buffer{}
-
- cW(buf, false, bRed, "\n")
- cW(buf, useColor, bCyan, " panic: ")
- cW(buf, useColor, bBlue, "%v", rvr)
- cW(buf, false, bWhite, "\n \n")
-
- // process debug stack info
- stack := strings.Split(string(debugStack), "\n")
- lines := []string{}
-
- // locate panic line, as we may have nested panics
- for i := len(stack) - 1; i > 0; i-- {
- lines = append(lines, stack[i])
- if strings.HasPrefix(stack[i], "panic(0x") {
- lines = lines[0 : len(lines)-2] // remove boilerplate
- break
- }
- }
-
- // reverse
- for i := len(lines)/2 - 1; i >= 0; i-- {
- opp := len(lines) - 1 - i
- lines[i], lines[opp] = lines[opp], lines[i]
- }
-
- // decorate
- for i, line := range lines {
- lines[i], err = s.decorateLine(line, useColor, i)
- if err != nil {
- return nil, err
- }
- }
-
- for _, l := range lines {
- fmt.Fprintf(buf, "%s", l)
- }
- return buf.Bytes(), nil
-}
-
-func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) {
- line = strings.TrimSpace(line)
- if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") {
- return s.decorateSourceLine(line, useColor, num)
- } else if strings.HasSuffix(line, ")") {
- return s.decorateFuncCallLine(line, useColor, num)
- } else {
- if strings.HasPrefix(line, "\t") {
- return strings.Replace(line, "\t", " ", 1), nil
- } else {
- return fmt.Sprintf(" %s\n", line), nil
- }
- }
-}
-
-func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) {
- idx := strings.LastIndex(line, "(")
- if idx < 0 {
- return "", errors.New("not a func call line")
- }
-
- buf := &bytes.Buffer{}
- pkg := line[0:idx]
- // addr := line[idx:]
- method := ""
-
- idx = strings.LastIndex(pkg, string(os.PathSeparator))
- if idx < 0 {
- idx = strings.Index(pkg, ".")
- method = pkg[idx:]
- pkg = pkg[0:idx]
- } else {
- method = pkg[idx+1:]
- pkg = pkg[0 : idx+1]
- idx = strings.Index(method, ".")
- pkg += method[0:idx]
- method = method[idx:]
- }
- pkgColor := nYellow
- methodColor := bGreen
-
- if num == 0 {
- cW(buf, useColor, bRed, " -> ")
- pkgColor = bMagenta
- methodColor = bRed
- } else {
- cW(buf, useColor, bWhite, " ")
- }
- cW(buf, useColor, pkgColor, "%s", pkg)
- cW(buf, useColor, methodColor, "%s\n", method)
- // cW(buf, useColor, nBlack, "%s", addr)
- return buf.String(), nil
-}
-
-func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) {
- idx := strings.LastIndex(line, ".go:")
- if idx < 0 {
- return "", errors.New("not a source line")
- }
-
- buf := &bytes.Buffer{}
- path := line[0 : idx+3]
- lineno := line[idx+3:]
-
- idx = strings.LastIndex(path, string(os.PathSeparator))
- dir := path[0 : idx+1]
- file := path[idx+1:]
-
- idx = strings.Index(lineno, " ")
- if idx > 0 {
- lineno = lineno[0:idx]
- }
- fileColor := bCyan
- lineColor := bGreen
-
- if num == 1 {
- cW(buf, useColor, bRed, " -> ")
- fileColor = bRed
- lineColor = bMagenta
- } else {
- cW(buf, false, bWhite, " ")
- }
- cW(buf, useColor, bWhite, "%s", dir)
- cW(buf, useColor, fileColor, "%s", file)
- cW(buf, useColor, lineColor, "%s", lineno)
- if num == 1 {
- cW(buf, false, bWhite, "\n")
- }
- cW(buf, false, bWhite, "\n")
-
- return buf.String(), nil
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go
deleted file mode 100644
index 4903ecc214..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/request_id.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package middleware
-
-// Ported from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "context"
- "crypto/rand"
- "encoding/base64"
- "fmt"
- "net/http"
- "os"
- "strings"
- "sync/atomic"
-)
-
-// Key to use when setting the request ID.
-type ctxKeyRequestID int
-
-// RequestIDKey is the key that holds the unique request ID in a request context.
-const RequestIDKey ctxKeyRequestID = 0
-
-// RequestIDHeader is the name of the HTTP Header which contains the request id.
-// Exported so that it can be changed by developers
-var RequestIDHeader = "X-Request-Id"
-
-var prefix string
-var reqid uint64
-
-// A quick note on the statistics here: we're trying to calculate the chance that
-// two randomly generated base62 prefixes will collide. We use the formula from
-// http://en.wikipedia.org/wiki/Birthday_problem
-//
-// P[m, n] \approx 1 - e^{-m^2/2n}
-//
-// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
-// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
-//
-// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
-//
-// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
-// our purposes, and is surely more than anyone would ever need in practice -- a
-// process that is rebooted a handful of times a day for a hundred years has less
-// than a millionth of a percent chance of generating two colliding IDs.
-
-func init() {
- hostname, err := os.Hostname()
- if hostname == "" || err != nil {
- hostname = "localhost"
- }
- var buf [12]byte
- var b64 string
- for len(b64) < 10 {
- rand.Read(buf[:])
- b64 = base64.StdEncoding.EncodeToString(buf[:])
- b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
- }
-
- prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
-}
-
-// RequestID is a middleware that injects a request ID into the context of each
-// request. A request ID is a string of the form "host.example.com/random-0001",
-// where "random" is a base62 random string that uniquely identifies this go
-// process, and where the last number is an atomically incremented request
-// counter.
-func RequestID(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
- requestID := r.Header.Get(RequestIDHeader)
- if requestID == "" {
- myid := atomic.AddUint64(&reqid, 1)
- requestID = fmt.Sprintf("%s-%06d", prefix, myid)
- }
- ctx = context.WithValue(ctx, RequestIDKey, requestID)
- next.ServeHTTP(w, r.WithContext(ctx))
- }
- return http.HandlerFunc(fn)
-}
-
-// GetReqID returns a request ID from the given context if one is present.
-// Returns the empty string if a request ID cannot be found.
-func GetReqID(ctx context.Context) string {
- if ctx == nil {
- return ""
- }
- if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
- return reqID
- }
- return ""
-}
-
-// NextRequestID generates the next request ID in the sequence.
-func NextRequestID() uint64 {
- return atomic.AddUint64(&reqid, 1)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/route_headers.go b/vendor/github.com/go-chi/chi/middleware/route_headers.go
deleted file mode 100644
index 7ee30c8773..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/route_headers.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strings"
-)
-
-// RouteHeaders is a neat little header-based router that allows you to direct
-// the flow of a request through a middleware stack based on a request header.
-//
-// For example, lets say you'd like to setup multiple routers depending on the
-// request Host header, you could then do something as so:
-//
-// r := chi.NewRouter()
-// rSubdomain := chi.NewRouter()
-//
-// r.Use(middleware.RouteHeaders().
-// Route("Host", "example.com", middleware.New(r)).
-// Route("Host", "*.example.com", middleware.New(rSubdomain)).
-// Handler)
-//
-// r.Get("/", h)
-// rSubdomain.Get("/", h2)
-//
-//
-// Another example, imagine you want to setup multiple CORS handlers, where for
-// your origin servers you allow authorized requests, but for third-party public
-// requests, authorization is disabled.
-//
-// r := chi.NewRouter()
-//
-// r.Use(middleware.RouteHeaders().
-// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
-// AllowedOrigins: []string{"https://api.skyweaver.net"},
-// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
-// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
-// AllowCredentials: true, // <----------<<< allow credentials
-// })).
-// Route("Origin", "*", cors.Handler(cors.Options{
-// AllowedOrigins: []string{"*"},
-// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
-// AllowedHeaders: []string{"Accept", "Content-Type"},
-// AllowCredentials: false, // <----------<<< do not allow credentials
-// })).
-// Handler)
-//
-func RouteHeaders() HeaderRouter {
- return HeaderRouter{}
-}
-
-type HeaderRouter map[string][]HeaderRoute
-
-func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
- header = strings.ToLower(header)
- k := hr[header]
- if k == nil {
- hr[header] = []HeaderRoute{}
- }
- hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler})
- return hr
-}
-
-func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
- header = strings.ToLower(header)
- k := hr[header]
- if k == nil {
- hr[header] = []HeaderRoute{}
- }
- patterns := []Pattern{}
- for _, m := range match {
- patterns = append(patterns, NewPattern(m))
- }
- hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler})
- return hr
-}
-
-func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter {
- hr["*"] = []HeaderRoute{{Middleware: handler}}
- return hr
-}
-
-func (hr HeaderRouter) Handler(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if len(hr) == 0 {
- // skip if no routes set
- next.ServeHTTP(w, r)
- }
-
- // find first matching header route, and continue
- for header, matchers := range hr {
- headerValue := r.Header.Get(header)
- if headerValue == "" {
- continue
- }
- headerValue = strings.ToLower(headerValue)
- for _, matcher := range matchers {
- if matcher.IsMatch(headerValue) {
- matcher.Middleware(next).ServeHTTP(w, r)
- return
- }
- }
- }
-
- // if no match, check for "*" default route
- matcher, ok := hr["*"]
- if !ok || matcher[0].Middleware == nil {
- next.ServeHTTP(w, r)
- return
- }
- matcher[0].Middleware(next).ServeHTTP(w, r)
- })
-}
-
-type HeaderRoute struct {
- MatchAny []Pattern
- MatchOne Pattern
- Middleware func(next http.Handler) http.Handler
-}
-
-func (r HeaderRoute) IsMatch(value string) bool {
- if len(r.MatchAny) > 0 {
- for _, m := range r.MatchAny {
- if m.Match(value) {
- return true
- }
- }
- } else if r.MatchOne.Match(value) {
- return true
- }
- return false
-}
-
-type Pattern struct {
- prefix string
- suffix string
- wildcard bool
-}
-
-func NewPattern(value string) Pattern {
- p := Pattern{}
- if i := strings.IndexByte(value, '*'); i >= 0 {
- p.wildcard = true
- p.prefix = value[0:i]
- p.suffix = value[i+1:]
- } else {
- p.prefix = value
- }
- return p
-}
-
-func (p Pattern) Match(v string) bool {
- if !p.wildcard {
- if p.prefix == v {
- return true
- } else {
- return false
- }
- }
- return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go
deleted file mode 100644
index 2b8b1842ab..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/strip.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package middleware
-
-import (
- "fmt"
- "net/http"
-
- "github.com/go-chi/chi"
-)
-
-// StripSlashes is a middleware that will match request paths with a trailing
-// slash, strip it from the path and continue routing through the mux, if a route
-// matches, then it will serve the handler.
-func StripSlashes(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- var path string
- rctx := chi.RouteContext(r.Context())
- if rctx.RoutePath != "" {
- path = rctx.RoutePath
- } else {
- path = r.URL.Path
- }
- if len(path) > 1 && path[len(path)-1] == '/' {
- rctx.RoutePath = path[:len(path)-1]
- }
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
-}
-
-// RedirectSlashes is a middleware that will match request paths with a trailing
-// slash and redirect to the same path, less the trailing slash.
-//
-// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
-// see https://github.com/go-chi/chi/issues/343
-func RedirectSlashes(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- var path string
- rctx := chi.RouteContext(r.Context())
- if rctx.RoutePath != "" {
- path = rctx.RoutePath
- } else {
- path = r.URL.Path
- }
- if len(path) > 1 && path[len(path)-1] == '/' {
- if r.URL.RawQuery != "" {
- path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
- } else {
- path = path[:len(path)-1]
- }
- http.Redirect(w, r, path, 301)
- return
- }
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go
deleted file mode 100644
index 5ead7b9243..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/terminal.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package middleware
-
-// Ported from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "fmt"
- "io"
- "os"
-)
-
-var (
- // Normal colors
- nBlack = []byte{'\033', '[', '3', '0', 'm'}
- nRed = []byte{'\033', '[', '3', '1', 'm'}
- nGreen = []byte{'\033', '[', '3', '2', 'm'}
- nYellow = []byte{'\033', '[', '3', '3', 'm'}
- nBlue = []byte{'\033', '[', '3', '4', 'm'}
- nMagenta = []byte{'\033', '[', '3', '5', 'm'}
- nCyan = []byte{'\033', '[', '3', '6', 'm'}
- nWhite = []byte{'\033', '[', '3', '7', 'm'}
- // Bright colors
- bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
- bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
- bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
- bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
- bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
- bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
- bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
- bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
-
- reset = []byte{'\033', '[', '0', 'm'}
-)
-
-var IsTTY bool
-
-func init() {
- // This is sort of cheating: if stdout is a character device, we assume
- // that means it's a TTY. Unfortunately, there are many non-TTY
- // character devices, but fortunately stdout is rarely set to any of
- // them.
- //
- // We could solve this properly by pulling in a dependency on
- // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
- // heuristic for whether to print in color or in black-and-white, I'd
- // really rather not.
- fi, err := os.Stdout.Stat()
- if err == nil {
- m := os.ModeDevice | os.ModeCharDevice
- IsTTY = fi.Mode()&m == m
- }
-}
-
-// colorWrite
-func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
- if IsTTY && useColor {
- w.Write(color)
- }
- fmt.Fprintf(w, s, args...)
- if IsTTY && useColor {
- w.Write(reset)
- }
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go
deleted file mode 100644
index fdedd3c127..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/throttle.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package middleware
-
-import (
- "net/http"
- "strconv"
- "time"
-)
-
-const (
- errCapacityExceeded = "Server capacity exceeded."
- errTimedOut = "Timed out while waiting for a pending request to complete."
- errContextCanceled = "Context was canceled."
-)
-
-var (
- defaultBacklogTimeout = time.Second * 60
-)
-
-// ThrottleOpts represents a set of throttling options.
-type ThrottleOpts struct {
- Limit int
- BacklogLimit int
- BacklogTimeout time.Duration
- RetryAfterFn func(ctxDone bool) time.Duration
-}
-
-// Throttle is a middleware that limits number of currently processed requests
-// at a time across all users. Note: Throttle is not a rate-limiter per user,
-// instead it just puts a ceiling on the number of currentl in-flight requests
-// being processed from the point from where the Throttle middleware is mounted.
-func Throttle(limit int) func(http.Handler) http.Handler {
- return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
-}
-
-// ThrottleBacklog is a middleware that limits number of currently processed
-// requests at a time and provides a backlog for holding a finite number of
-// pending requests.
-func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
- return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
-}
-
-// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
-func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
- if opts.Limit < 1 {
- panic("chi/middleware: Throttle expects limit > 0")
- }
-
- if opts.BacklogLimit < 0 {
- panic("chi/middleware: Throttle expects backlogLimit to be positive")
- }
-
- t := throttler{
- tokens: make(chan token, opts.Limit),
- backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
- backlogTimeout: opts.BacklogTimeout,
- retryAfterFn: opts.RetryAfterFn,
- }
-
- // Filling tokens.
- for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
- if i < opts.Limit {
- t.tokens <- token{}
- }
- t.backlogTokens <- token{}
- }
-
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- select {
-
- case <-ctx.Done():
- t.setRetryAfterHeaderIfNeeded(w, true)
- http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
- return
-
- case btok := <-t.backlogTokens:
- timer := time.NewTimer(t.backlogTimeout)
-
- defer func() {
- t.backlogTokens <- btok
- }()
-
- select {
- case <-timer.C:
- t.setRetryAfterHeaderIfNeeded(w, false)
- http.Error(w, errTimedOut, http.StatusServiceUnavailable)
- return
- case <-ctx.Done():
- timer.Stop()
- t.setRetryAfterHeaderIfNeeded(w, true)
- http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
- return
- case tok := <-t.tokens:
- defer func() {
- timer.Stop()
- t.tokens <- tok
- }()
- next.ServeHTTP(w, r)
- }
- return
-
- default:
- t.setRetryAfterHeaderIfNeeded(w, false)
- http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable)
- return
- }
- }
-
- return http.HandlerFunc(fn)
- }
-}
-
-// token represents a request that is being processed.
-type token struct{}
-
-// throttler limits number of currently processed requests at a time.
-type throttler struct {
- tokens chan token
- backlogTokens chan token
- backlogTimeout time.Duration
- retryAfterFn func(ctxDone bool) time.Duration
-}
-
-// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
-func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
- if t.retryAfterFn == nil {
- return
- }
- w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go
deleted file mode 100644
index 8e373536cf..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/timeout.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package middleware
-
-import (
- "context"
- "net/http"
- "time"
-)
-
-// Timeout is a middleware that cancels ctx after a given timeout and return
-// a 504 Gateway Timeout error to the client.
-//
-// It's required that you select the ctx.Done() channel to check for the signal
-// if the context has reached its deadline and return, otherwise the timeout
-// signal will be just ignored.
-//
-// ie. a route/handler may look like:
-//
-// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
-// ctx := r.Context()
-// processTime := time.Duration(rand.Intn(4)+1) * time.Second
-//
-// select {
-// case <-ctx.Done():
-// return
-//
-// case <-time.After(processTime):
-// // The above channel simulates some hard work.
-// }
-//
-// w.Write([]byte("done"))
-// })
-//
-func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- ctx, cancel := context.WithTimeout(r.Context(), timeout)
- defer func() {
- cancel()
- if ctx.Err() == context.DeadlineExceeded {
- w.WriteHeader(http.StatusGatewayTimeout)
- }
- }()
-
- r = r.WithContext(ctx)
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
- }
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go
deleted file mode 100644
index 5749e4f32b..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/url_format.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package middleware
-
-import (
- "context"
- "net/http"
- "strings"
-
- "github.com/go-chi/chi"
-)
-
-var (
- // URLFormatCtxKey is the context.Context key to store the URL format data
- // for a request.
- URLFormatCtxKey = &contextKey{"URLFormat"}
-)
-
-// URLFormat is a middleware that parses the url extension from a request path and stores it
-// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
-// trim the suffix from the routing path and continue routing.
-//
-// Routers should not include a url parameter for the suffix when using this middleware.
-//
-// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
-//
-// func routes() http.Handler {
-// r := chi.NewRouter()
-// r.Use(middleware.URLFormat)
-//
-// r.Get("/articles/{id}", ListArticles)
-//
-// return r
-// }
-//
-// func ListArticles(w http.ResponseWriter, r *http.Request) {
-// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
-//
-// switch urlFormat {
-// case "json":
-// render.JSON(w, r, articles)
-// case "xml:"
-// render.XML(w, r, articles)
-// default:
-// render.JSON(w, r, articles)
-// }
-// }
-//
-func URLFormat(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- var format string
- path := r.URL.Path
-
- if strings.Index(path, ".") > 0 {
- base := strings.LastIndex(path, "/")
- idx := strings.Index(path[base:], ".")
-
- if idx > 0 {
- idx += base
- format = path[idx+1:]
-
- rctx := chi.RouteContext(r.Context())
- rctx.RoutePath = path[:idx]
- }
- }
-
- r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
-
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go
deleted file mode 100644
index fbbd0393fb..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/value.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package middleware
-
-import (
- "context"
- "net/http"
-)
-
-// WithValue is a middleware that sets a given key/value in a context chain.
-func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler {
- return func(next http.Handler) http.Handler {
- fn := func(w http.ResponseWriter, r *http.Request) {
- r = r.WithContext(context.WithValue(r.Context(), key, val))
- next.ServeHTTP(w, r)
- }
- return http.HandlerFunc(fn)
- }
-}
diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
deleted file mode 100644
index 382a523e48..0000000000
--- a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package middleware
-
-// The original work was derived from Goji's middleware, source:
-// https://github.com/zenazn/goji/tree/master/web/middleware
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
-// hook into various parts of the response process.
-func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
- _, fl := w.(http.Flusher)
-
- bw := basicWriter{ResponseWriter: w}
-
- if protoMajor == 2 {
- _, ps := w.(http.Pusher)
- if fl && ps {
- return &http2FancyWriter{bw}
- }
- } else {
- _, hj := w.(http.Hijacker)
- _, rf := w.(io.ReaderFrom)
- if fl && hj && rf {
- return &httpFancyWriter{bw}
- }
- }
- if fl {
- return &flushWriter{bw}
- }
-
- return &bw
-}
-
-// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
-// into various parts of the response process.
-type WrapResponseWriter interface {
- http.ResponseWriter
- // Status returns the HTTP status of the request, or 0 if one has not
- // yet been sent.
- Status() int
- // BytesWritten returns the total number of bytes sent to the client.
- BytesWritten() int
- // Tee causes the response body to be written to the given io.Writer in
- // addition to proxying the writes through. Only one io.Writer can be
- // tee'd to at once: setting a second one will overwrite the first.
- // Writes will be sent to the proxy before being written to this
- // io.Writer. It is illegal for the tee'd writer to be modified
- // concurrently with writes.
- Tee(io.Writer)
- // Unwrap returns the original proxied target.
- Unwrap() http.ResponseWriter
-}
-
-// basicWriter wraps a http.ResponseWriter that implements the minimal
-// http.ResponseWriter interface.
-type basicWriter struct {
- http.ResponseWriter
- wroteHeader bool
- code int
- bytes int
- tee io.Writer
-}
-
-func (b *basicWriter) WriteHeader(code int) {
- if !b.wroteHeader {
- b.code = code
- b.wroteHeader = true
- b.ResponseWriter.WriteHeader(code)
- }
-}
-
-func (b *basicWriter) Write(buf []byte) (int, error) {
- b.maybeWriteHeader()
- n, err := b.ResponseWriter.Write(buf)
- if b.tee != nil {
- _, err2 := b.tee.Write(buf[:n])
- // Prefer errors generated by the proxied writer.
- if err == nil {
- err = err2
- }
- }
- b.bytes += n
- return n, err
-}
-
-func (b *basicWriter) maybeWriteHeader() {
- if !b.wroteHeader {
- b.WriteHeader(http.StatusOK)
- }
-}
-
-func (b *basicWriter) Status() int {
- return b.code
-}
-
-func (b *basicWriter) BytesWritten() int {
- return b.bytes
-}
-
-func (b *basicWriter) Tee(w io.Writer) {
- b.tee = w
-}
-
-func (b *basicWriter) Unwrap() http.ResponseWriter {
- return b.ResponseWriter
-}
-
-type flushWriter struct {
- basicWriter
-}
-
-func (f *flushWriter) Flush() {
- f.wroteHeader = true
- fl := f.basicWriter.ResponseWriter.(http.Flusher)
- fl.Flush()
-}
-
-var _ http.Flusher = &flushWriter{}
-
-// httpFancyWriter is a HTTP writer that additionally satisfies
-// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
-// of wrapping the http.ResponseWriter that package http gives you, in order to
-// make the proxied object support the full method set of the proxied object.
-type httpFancyWriter struct {
- basicWriter
-}
-
-func (f *httpFancyWriter) Flush() {
- f.wroteHeader = true
- fl := f.basicWriter.ResponseWriter.(http.Flusher)
- fl.Flush()
-}
-
-func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- hj := f.basicWriter.ResponseWriter.(http.Hijacker)
- return hj.Hijack()
-}
-
-func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
- return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
-}
-
-func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
- if f.basicWriter.tee != nil {
- n, err := io.Copy(&f.basicWriter, r)
- f.basicWriter.bytes += int(n)
- return n, err
- }
- rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
- f.basicWriter.maybeWriteHeader()
- n, err := rf.ReadFrom(r)
- f.basicWriter.bytes += int(n)
- return n, err
-}
-
-var _ http.Flusher = &httpFancyWriter{}
-var _ http.Hijacker = &httpFancyWriter{}
-var _ http.Pusher = &http2FancyWriter{}
-var _ io.ReaderFrom = &httpFancyWriter{}
-
-// http2FancyWriter is a HTTP2 writer that additionally satisfies
-// http.Flusher, and io.ReaderFrom. It exists for the common case
-// of wrapping the http.ResponseWriter that package http gives you, in order to
-// make the proxied object support the full method set of the proxied object.
-type http2FancyWriter struct {
- basicWriter
-}
-
-func (f *http2FancyWriter) Flush() {
- f.wroteHeader = true
- fl := f.basicWriter.ResponseWriter.(http.Flusher)
- fl.Flush()
-}
-
-var _ http.Flusher = &http2FancyWriter{}
diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go
deleted file mode 100644
index 52950e97b5..0000000000
--- a/vendor/github.com/go-chi/chi/mux.go
+++ /dev/null
@@ -1,466 +0,0 @@
-package chi
-
-import (
- "context"
- "fmt"
- "net/http"
- "strings"
- "sync"
-)
-
-var _ Router = &Mux{}
-
-// Mux is a simple HTTP route multiplexer that parses a request path,
-// records any URL params, and executes an end handler. It implements
-// the http.Handler interface and is friendly with the standard library.
-//
-// Mux is designed to be fast, minimal and offer a powerful API for building
-// modular and composable HTTP services with a large set of handlers. It's
-// particularly useful for writing large REST API services that break a handler
-// into many smaller parts composed of middlewares and end handlers.
-type Mux struct {
- // The radix trie router
- tree *node
-
- // The middleware stack
- middlewares []func(http.Handler) http.Handler
-
- // Controls the behaviour of middleware chain generation when a mux
- // is registered as an inline group inside another mux.
- inline bool
- parent *Mux
-
- // The computed mux handler made of the chained middleware stack and
- // the tree router
- handler http.Handler
-
- // Routing context pool
- pool *sync.Pool
-
- // Custom route not found handler
- notFoundHandler http.HandlerFunc
-
- // Custom method not allowed handler
- methodNotAllowedHandler http.HandlerFunc
-}
-
-// NewMux returns a newly initialized Mux object that implements the Router
-// interface.
-func NewMux() *Mux {
- mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
- mux.pool.New = func() interface{} {
- return NewRouteContext()
- }
- return mux
-}
-
-// ServeHTTP is the single method of the http.Handler interface that makes
-// Mux interoperable with the standard library. It uses a sync.Pool to get and
-// reuse routing contexts for each request.
-func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- // Ensure the mux has some routes defined on the mux
- if mx.handler == nil {
- mx.NotFoundHandler().ServeHTTP(w, r)
- return
- }
-
- // Check if a routing context already exists from a parent router.
- rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
- if rctx != nil {
- mx.handler.ServeHTTP(w, r)
- return
- }
-
- // Fetch a RouteContext object from the sync pool, and call the computed
- // mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
- // Once the request is finished, reset the routing context and put it back
- // into the pool for reuse from another request.
- rctx = mx.pool.Get().(*Context)
- rctx.Reset()
- rctx.Routes = mx
-
- // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
- r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
-
- // Serve the request and once its done, put the request context back in the sync pool
- mx.handler.ServeHTTP(w, r)
- mx.pool.Put(rctx)
-}
-
-// Use appends a middleware handler to the Mux middleware stack.
-//
-// The middleware stack for any Mux will execute before searching for a matching
-// route to a specific handler, which provides opportunity to respond early,
-// change the course of the request execution, or set request-scoped values for
-// the next http.Handler.
-func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
- if mx.handler != nil {
- panic("chi: all middlewares must be defined before routes on a mux")
- }
- mx.middlewares = append(mx.middlewares, middlewares...)
-}
-
-// Handle adds the route `pattern` that matches any http method to
-// execute the `handler` http.Handler.
-func (mx *Mux) Handle(pattern string, handler http.Handler) {
- mx.handle(mALL, pattern, handler)
-}
-
-// HandleFunc adds the route `pattern` that matches any http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mALL, pattern, handlerFn)
-}
-
-// Method adds the route `pattern` that matches `method` http method to
-// execute the `handler` http.Handler.
-func (mx *Mux) Method(method, pattern string, handler http.Handler) {
- m, ok := methodMap[strings.ToUpper(method)]
- if !ok {
- panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
- }
- mx.handle(m, pattern, handler)
-}
-
-// MethodFunc adds the route `pattern` that matches `method` http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
- mx.Method(method, pattern, handlerFn)
-}
-
-// Connect adds the route `pattern` that matches a CONNECT http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mCONNECT, pattern, handlerFn)
-}
-
-// Delete adds the route `pattern` that matches a DELETE http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mDELETE, pattern, handlerFn)
-}
-
-// Get adds the route `pattern` that matches a GET http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mGET, pattern, handlerFn)
-}
-
-// Head adds the route `pattern` that matches a HEAD http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mHEAD, pattern, handlerFn)
-}
-
-// Options adds the route `pattern` that matches a OPTIONS http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mOPTIONS, pattern, handlerFn)
-}
-
-// Patch adds the route `pattern` that matches a PATCH http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mPATCH, pattern, handlerFn)
-}
-
-// Post adds the route `pattern` that matches a POST http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mPOST, pattern, handlerFn)
-}
-
-// Put adds the route `pattern` that matches a PUT http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mPUT, pattern, handlerFn)
-}
-
-// Trace adds the route `pattern` that matches a TRACE http method to
-// execute the `handlerFn` http.HandlerFunc.
-func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
- mx.handle(mTRACE, pattern, handlerFn)
-}
-
-// NotFound sets a custom http.HandlerFunc for routing paths that could
-// not be found. The default 404 handler is `http.NotFound`.
-func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
- // Build NotFound handler chain
- m := mx
- hFn := handlerFn
- if mx.inline && mx.parent != nil {
- m = mx.parent
- hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
- }
-
- // Update the notFoundHandler from this point forward
- m.notFoundHandler = hFn
- m.updateSubRoutes(func(subMux *Mux) {
- if subMux.notFoundHandler == nil {
- subMux.NotFound(hFn)
- }
- })
-}
-
-// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
-// method is unresolved. The default handler returns a 405 with an empty body.
-func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
- // Build MethodNotAllowed handler chain
- m := mx
- hFn := handlerFn
- if mx.inline && mx.parent != nil {
- m = mx.parent
- hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
- }
-
- // Update the methodNotAllowedHandler from this point forward
- m.methodNotAllowedHandler = hFn
- m.updateSubRoutes(func(subMux *Mux) {
- if subMux.methodNotAllowedHandler == nil {
- subMux.MethodNotAllowed(hFn)
- }
- })
-}
-
-// With adds inline middlewares for an endpoint handler.
-func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
- // Similarly as in handle(), we must build the mux handler once additional
- // middleware registration isn't allowed for this stack, like now.
- if !mx.inline && mx.handler == nil {
- mx.buildRouteHandler()
- }
-
- // Copy middlewares from parent inline muxs
- var mws Middlewares
- if mx.inline {
- mws = make(Middlewares, len(mx.middlewares))
- copy(mws, mx.middlewares)
- }
- mws = append(mws, middlewares...)
-
- im := &Mux{
- pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
- notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
- }
-
- return im
-}
-
-// Group creates a new inline-Mux with a fresh middleware stack. It's useful
-// for a group of handlers along the same routing path that use an additional
-// set of middlewares. See _examples/.
-func (mx *Mux) Group(fn func(r Router)) Router {
- im := mx.With().(*Mux)
- if fn != nil {
- fn(im)
- }
- return im
-}
-
-// Route creates a new Mux with a fresh middleware stack and mounts it
-// along the `pattern` as a subrouter. Effectively, this is a short-hand
-// call to Mount. See _examples/.
-func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
- subRouter := NewRouter()
- if fn != nil {
- fn(subRouter)
- }
- mx.Mount(pattern, subRouter)
- return subRouter
-}
-
-// Mount attaches another http.Handler or chi Router as a subrouter along a routing
-// path. It's very useful to split up a large API as many independent routers and
-// compose them as a single service using Mount. See _examples/.
-//
-// Note that Mount() simply sets a wildcard along the `pattern` that will continue
-// routing at the `handler`, which in most cases is another chi.Router. As a result,
-// if you define two Mount() routes on the exact same pattern the mount will panic.
-func (mx *Mux) Mount(pattern string, handler http.Handler) {
- // Provide runtime safety for ensuring a pattern isn't mounted on an existing
- // routing pattern.
- if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
- panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
- }
-
- // Assign sub-Router's with the parent not found & method not allowed handler if not specified.
- subr, ok := handler.(*Mux)
- if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
- subr.NotFound(mx.notFoundHandler)
- }
- if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
- subr.MethodNotAllowed(mx.methodNotAllowedHandler)
- }
-
- mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- rctx := RouteContext(r.Context())
- rctx.RoutePath = mx.nextRoutePath(rctx)
- handler.ServeHTTP(w, r)
- })
-
- if pattern == "" || pattern[len(pattern)-1] != '/' {
- mx.handle(mALL|mSTUB, pattern, mountHandler)
- mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
- pattern += "/"
- }
-
- method := mALL
- subroutes, _ := handler.(Routes)
- if subroutes != nil {
- method |= mSTUB
- }
- n := mx.handle(method, pattern+"*", mountHandler)
-
- if subroutes != nil {
- n.subroutes = subroutes
- }
-}
-
-// Routes returns a slice of routing information from the tree,
-// useful for traversing available routes of a router.
-func (mx *Mux) Routes() []Route {
- return mx.tree.routes()
-}
-
-// Middlewares returns a slice of middleware handler functions.
-func (mx *Mux) Middlewares() Middlewares {
- return mx.middlewares
-}
-
-// Match searches the routing tree for a handler that matches the method/path.
-// It's similar to routing a http request, but without executing the handler
-// thereafter.
-//
-// Note: the *Context state is updated during execution, so manage
-// the state carefully or make a NewRouteContext().
-func (mx *Mux) Match(rctx *Context, method, path string) bool {
- m, ok := methodMap[method]
- if !ok {
- return false
- }
-
- node, _, h := mx.tree.FindRoute(rctx, m, path)
-
- if node != nil && node.subroutes != nil {
- rctx.RoutePath = mx.nextRoutePath(rctx)
- return node.subroutes.Match(rctx, method, rctx.RoutePath)
- }
-
- return h != nil
-}
-
-// NotFoundHandler returns the default Mux 404 responder whenever a route
-// cannot be found.
-func (mx *Mux) NotFoundHandler() http.HandlerFunc {
- if mx.notFoundHandler != nil {
- return mx.notFoundHandler
- }
- return http.NotFound
-}
-
-// MethodNotAllowedHandler returns the default Mux 405 responder whenever
-// a method cannot be resolved for a route.
-func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
- if mx.methodNotAllowedHandler != nil {
- return mx.methodNotAllowedHandler
- }
- return methodNotAllowedHandler
-}
-
-// buildRouteHandler builds the single mux handler that is a chain of the middleware
-// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
-// point, no other middlewares can be registered on this Mux's stack. But you can still
-// compose additional middlewares via Group()'s or using a chained middleware handler.
-func (mx *Mux) buildRouteHandler() {
- mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
-}
-
-// handle registers a http.Handler in the routing tree for a particular http method
-// and routing pattern.
-func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
- if len(pattern) == 0 || pattern[0] != '/' {
- panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
- }
-
- // Build the computed routing handler for this routing pattern.
- if !mx.inline && mx.handler == nil {
- mx.buildRouteHandler()
- }
-
- // Build endpoint handler with inline middlewares for the route
- var h http.Handler
- if mx.inline {
- mx.handler = http.HandlerFunc(mx.routeHTTP)
- h = Chain(mx.middlewares...).Handler(handler)
- } else {
- h = handler
- }
-
- // Add the endpoint to the tree and return the node
- return mx.tree.InsertRoute(method, pattern, h)
-}
-
-// routeHTTP routes a http.Request through the Mux routing tree to serve
-// the matching handler for a particular http method.
-func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
- // Grab the route context object
- rctx := r.Context().Value(RouteCtxKey).(*Context)
-
- // The request routing path
- routePath := rctx.RoutePath
- if routePath == "" {
- if r.URL.RawPath != "" {
- routePath = r.URL.RawPath
- } else {
- routePath = r.URL.Path
- }
- }
-
- // Check if method is supported by chi
- if rctx.RouteMethod == "" {
- rctx.RouteMethod = r.Method
- }
- method, ok := methodMap[rctx.RouteMethod]
- if !ok {
- mx.MethodNotAllowedHandler().ServeHTTP(w, r)
- return
- }
-
- // Find the route
- if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
- h.ServeHTTP(w, r)
- return
- }
- if rctx.methodNotAllowed {
- mx.MethodNotAllowedHandler().ServeHTTP(w, r)
- } else {
- mx.NotFoundHandler().ServeHTTP(w, r)
- }
-}
-
-func (mx *Mux) nextRoutePath(rctx *Context) string {
- routePath := "/"
- nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
- if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
- routePath = "/" + rctx.routeParams.Values[nx]
- }
- return routePath
-}
-
-// Recursively update data on child routers.
-func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
- for _, r := range mx.tree.routes() {
- subMux, ok := r.SubRoutes.(*Mux)
- if !ok {
- continue
- }
- fn(subMux)
- }
-}
-
-// methodNotAllowedHandler is a helper function to respond with a 405,
-// method not allowed.
-func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(405)
- w.Write(nil)
-}
diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go
deleted file mode 100644
index 59b5b5f7b0..0000000000
--- a/vendor/github.com/go-chi/chi/tree.go
+++ /dev/null
@@ -1,865 +0,0 @@
-package chi
-
-// Radix tree implementation below is a based on the original work by
-// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
-// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
-
-import (
- "fmt"
- "math"
- "net/http"
- "regexp"
- "sort"
- "strconv"
- "strings"
-)
-
-type methodTyp int
-
-const (
- mSTUB methodTyp = 1 << iota
- mCONNECT
- mDELETE
- mGET
- mHEAD
- mOPTIONS
- mPATCH
- mPOST
- mPUT
- mTRACE
-)
-
-var mALL = mCONNECT | mDELETE | mGET | mHEAD |
- mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
-
-var methodMap = map[string]methodTyp{
- http.MethodConnect: mCONNECT,
- http.MethodDelete: mDELETE,
- http.MethodGet: mGET,
- http.MethodHead: mHEAD,
- http.MethodOptions: mOPTIONS,
- http.MethodPatch: mPATCH,
- http.MethodPost: mPOST,
- http.MethodPut: mPUT,
- http.MethodTrace: mTRACE,
-}
-
-// RegisterMethod adds support for custom HTTP method handlers, available
-// via Router#Method and Router#MethodFunc
-func RegisterMethod(method string) {
- if method == "" {
- return
- }
- method = strings.ToUpper(method)
- if _, ok := methodMap[method]; ok {
- return
- }
- n := len(methodMap)
- if n > strconv.IntSize {
- panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
- }
- mt := methodTyp(math.Exp2(float64(n)))
- methodMap[method] = mt
- mALL |= mt
-}
-
-type nodeTyp uint8
-
-const (
- ntStatic nodeTyp = iota // /home
- ntRegexp // /{id:[0-9]+}
- ntParam // /{user}
- ntCatchAll // /api/v1/*
-)
-
-type node struct {
- // node type: static, regexp, param, catchAll
- typ nodeTyp
-
- // first byte of the prefix
- label byte
-
- // first byte of the child prefix
- tail byte
-
- // prefix is the common prefix we ignore
- prefix string
-
- // regexp matcher for regexp nodes
- rex *regexp.Regexp
-
- // HTTP handler endpoints on the leaf node
- endpoints endpoints
-
- // subroutes on the leaf node
- subroutes Routes
-
- // child nodes should be stored in-order for iteration,
- // in groups of the node type.
- children [ntCatchAll + 1]nodes
-}
-
-// endpoints is a mapping of http method constants to handlers
-// for a given route.
-type endpoints map[methodTyp]*endpoint
-
-type endpoint struct {
- // endpoint handler
- handler http.Handler
-
- // pattern is the routing pattern for handler nodes
- pattern string
-
- // parameter keys recorded on handler nodes
- paramKeys []string
-}
-
-func (s endpoints) Value(method methodTyp) *endpoint {
- mh, ok := s[method]
- if !ok {
- mh = &endpoint{}
- s[method] = mh
- }
- return mh
-}
-
-func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
- var parent *node
- search := pattern
-
- for {
- // Handle key exhaustion
- if len(search) == 0 {
- // Insert or update the node's leaf handler
- n.setEndpoint(method, handler, pattern)
- return n
- }
-
- // We're going to be searching for a wild node next,
- // in this case, we need to get the tail
- var label = search[0]
- var segTail byte
- var segEndIdx int
- var segTyp nodeTyp
- var segRexpat string
- if label == '{' || label == '*' {
- segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
- }
-
- var prefix string
- if segTyp == ntRegexp {
- prefix = segRexpat
- }
-
- // Look for the edge to attach to
- parent = n
- n = n.getEdge(segTyp, label, segTail, prefix)
-
- // No edge, create one
- if n == nil {
- child := &node{label: label, tail: segTail, prefix: search}
- hn := parent.addChild(child, search)
- hn.setEndpoint(method, handler, pattern)
-
- return hn
- }
-
- // Found an edge to match the pattern
-
- if n.typ > ntStatic {
- // We found a param node, trim the param from the search path and continue.
- // This param/wild pattern segment would already be on the tree from a previous
- // call to addChild when creating a new node.
- search = search[segEndIdx:]
- continue
- }
-
- // Static nodes fall below here.
- // Determine longest prefix of the search key on match.
- commonPrefix := longestPrefix(search, n.prefix)
- if commonPrefix == len(n.prefix) {
- // the common prefix is as long as the current node's prefix we're attempting to insert.
- // keep the search going.
- search = search[commonPrefix:]
- continue
- }
-
- // Split the node
- child := &node{
- typ: ntStatic,
- prefix: search[:commonPrefix],
- }
- parent.replaceChild(search[0], segTail, child)
-
- // Restore the existing node
- n.label = n.prefix[commonPrefix]
- n.prefix = n.prefix[commonPrefix:]
- child.addChild(n, n.prefix)
-
- // If the new key is a subset, set the method/handler on this node and finish.
- search = search[commonPrefix:]
- if len(search) == 0 {
- child.setEndpoint(method, handler, pattern)
- return child
- }
-
- // Create a new edge for the node
- subchild := &node{
- typ: ntStatic,
- label: search[0],
- prefix: search,
- }
- hn := child.addChild(subchild, search)
- hn.setEndpoint(method, handler, pattern)
- return hn
- }
-}
-
-// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
-// For a URL router like chi's, we split the static, param, regexp and wildcard segments
-// into different nodes. In addition, addChild will recursively call itself until every
-// pattern segment is added to the url pattern tree as individual nodes, depending on type.
-func (n *node) addChild(child *node, prefix string) *node {
- search := prefix
-
- // handler leaf node added to the tree is the child.
- // this may be overridden later down the flow
- hn := child
-
- // Parse next segment
- segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
-
- // Add child depending on next up segment
- switch segTyp {
-
- case ntStatic:
- // Search prefix is all static (that is, has no params in path)
- // noop
-
- default:
- // Search prefix contains a param, regexp or wildcard
-
- if segTyp == ntRegexp {
- rex, err := regexp.Compile(segRexpat)
- if err != nil {
- panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
- }
- child.prefix = segRexpat
- child.rex = rex
- }
-
- if segStartIdx == 0 {
- // Route starts with a param
- child.typ = segTyp
-
- if segTyp == ntCatchAll {
- segStartIdx = -1
- } else {
- segStartIdx = segEndIdx
- }
- if segStartIdx < 0 {
- segStartIdx = len(search)
- }
- child.tail = segTail // for params, we set the tail
-
- if segStartIdx != len(search) {
- // add static edge for the remaining part, split the end.
- // its not possible to have adjacent param nodes, so its certainly
- // going to be a static node next.
-
- search = search[segStartIdx:] // advance search position
-
- nn := &node{
- typ: ntStatic,
- label: search[0],
- prefix: search,
- }
- hn = child.addChild(nn, search)
- }
-
- } else if segStartIdx > 0 {
- // Route has some param
-
- // starts with a static segment
- child.typ = ntStatic
- child.prefix = search[:segStartIdx]
- child.rex = nil
-
- // add the param edge node
- search = search[segStartIdx:]
-
- nn := &node{
- typ: segTyp,
- label: search[0],
- tail: segTail,
- }
- hn = child.addChild(nn, search)
-
- }
- }
-
- n.children[child.typ] = append(n.children[child.typ], child)
- n.children[child.typ].Sort()
- return hn
-}
-
-func (n *node) replaceChild(label, tail byte, child *node) {
- for i := 0; i < len(n.children[child.typ]); i++ {
- if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
- n.children[child.typ][i] = child
- n.children[child.typ][i].label = label
- n.children[child.typ][i].tail = tail
- return
- }
- }
- panic("chi: replacing missing child")
-}
-
-func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
- nds := n.children[ntyp]
- for i := 0; i < len(nds); i++ {
- if nds[i].label == label && nds[i].tail == tail {
- if ntyp == ntRegexp && nds[i].prefix != prefix {
- continue
- }
- return nds[i]
- }
- }
- return nil
-}
-
-func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
- // Set the handler for the method type on the node
- if n.endpoints == nil {
- n.endpoints = make(endpoints)
- }
-
- paramKeys := patParamKeys(pattern)
-
- if method&mSTUB == mSTUB {
- n.endpoints.Value(mSTUB).handler = handler
- }
- if method&mALL == mALL {
- h := n.endpoints.Value(mALL)
- h.handler = handler
- h.pattern = pattern
- h.paramKeys = paramKeys
- for _, m := range methodMap {
- h := n.endpoints.Value(m)
- h.handler = handler
- h.pattern = pattern
- h.paramKeys = paramKeys
- }
- } else {
- h := n.endpoints.Value(method)
- h.handler = handler
- h.pattern = pattern
- h.paramKeys = paramKeys
- }
-}
-
-func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
- // Reset the context routing pattern and params
- rctx.routePattern = ""
- rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
- rctx.routeParams.Values = rctx.routeParams.Values[:0]
-
- // Find the routing handlers for the path
- rn := n.findRoute(rctx, method, path)
- if rn == nil {
- return nil, nil, nil
- }
-
- // Record the routing params in the request lifecycle
- rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
- rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
-
- // Record the routing pattern in the request lifecycle
- if rn.endpoints[method].pattern != "" {
- rctx.routePattern = rn.endpoints[method].pattern
- rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
- }
-
- return rn, rn.endpoints, rn.endpoints[method].handler
-}
-
-// Recursive edge traversal by checking all nodeTyp groups along the way.
-// It's like searching through a multi-dimensional radix trie.
-func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
- nn := n
- search := path
-
- for t, nds := range nn.children {
- ntyp := nodeTyp(t)
- if len(nds) == 0 {
- continue
- }
-
- var xn *node
- xsearch := search
-
- var label byte
- if search != "" {
- label = search[0]
- }
-
- switch ntyp {
- case ntStatic:
- xn = nds.findEdge(label)
- if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
- continue
- }
- xsearch = xsearch[len(xn.prefix):]
-
- case ntParam, ntRegexp:
- // short-circuit and return no matching route for empty param values
- if xsearch == "" {
- continue
- }
-
- // serially loop through each node grouped by the tail delimiter
- for idx := 0; idx < len(nds); idx++ {
- xn = nds[idx]
-
- // label for param nodes is the delimiter byte
- p := strings.IndexByte(xsearch, xn.tail)
-
- if p < 0 {
- if xn.tail == '/' {
- p = len(xsearch)
- } else {
- continue
- }
- }
-
- if ntyp == ntRegexp && xn.rex != nil {
- if !xn.rex.Match([]byte(xsearch[:p])) {
- continue
- }
- } else if strings.IndexByte(xsearch[:p], '/') != -1 {
- // avoid a match across path segments
- continue
- }
-
- prevlen := len(rctx.routeParams.Values)
- rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
- xsearch = xsearch[p:]
-
- if len(xsearch) == 0 {
- if xn.isLeaf() {
- h := xn.endpoints[method]
- if h != nil && h.handler != nil {
- rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
- return xn
- }
-
- // flag that the routing context found a route, but not a corresponding
- // supported method
- rctx.methodNotAllowed = true
- }
- }
-
- // recursively find the next node on this branch
- fin := xn.findRoute(rctx, method, xsearch)
- if fin != nil {
- return fin
- }
-
- // not found on this branch, reset vars
- rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
- xsearch = search
- }
-
- rctx.routeParams.Values = append(rctx.routeParams.Values, "")
-
- default:
- // catch-all nodes
- rctx.routeParams.Values = append(rctx.routeParams.Values, search)
- xn = nds[0]
- xsearch = ""
- }
-
- if xn == nil {
- continue
- }
-
- // did we find it yet?
- if len(xsearch) == 0 {
- if xn.isLeaf() {
- h := xn.endpoints[method]
- if h != nil && h.handler != nil {
- rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
- return xn
- }
-
- // flag that the routing context found a route, but not a corresponding
- // supported method
- rctx.methodNotAllowed = true
- }
- }
-
- // recursively find the next node..
- fin := xn.findRoute(rctx, method, xsearch)
- if fin != nil {
- return fin
- }
-
- // Did not find final handler, let's remove the param here if it was set
- if xn.typ > ntStatic {
- if len(rctx.routeParams.Values) > 0 {
- rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
- }
- }
-
- }
-
- return nil
-}
-
-func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
- nds := n.children[ntyp]
- num := len(nds)
- idx := 0
-
- switch ntyp {
- case ntStatic, ntParam, ntRegexp:
- i, j := 0, num-1
- for i <= j {
- idx = i + (j-i)/2
- if label > nds[idx].label {
- i = idx + 1
- } else if label < nds[idx].label {
- j = idx - 1
- } else {
- i = num // breaks cond
- }
- }
- if nds[idx].label != label {
- return nil
- }
- return nds[idx]
-
- default: // catch all
- return nds[idx]
- }
-}
-
-func (n *node) isLeaf() bool {
- return n.endpoints != nil
-}
-
-func (n *node) findPattern(pattern string) bool {
- nn := n
- for _, nds := range nn.children {
- if len(nds) == 0 {
- continue
- }
-
- n = nn.findEdge(nds[0].typ, pattern[0])
- if n == nil {
- continue
- }
-
- var idx int
- var xpattern string
-
- switch n.typ {
- case ntStatic:
- idx = longestPrefix(pattern, n.prefix)
- if idx < len(n.prefix) {
- continue
- }
-
- case ntParam, ntRegexp:
- idx = strings.IndexByte(pattern, '}') + 1
-
- case ntCatchAll:
- idx = longestPrefix(pattern, "*")
-
- default:
- panic("chi: unknown node type")
- }
-
- xpattern = pattern[idx:]
- if len(xpattern) == 0 {
- return true
- }
-
- return n.findPattern(xpattern)
- }
- return false
-}
-
-func (n *node) routes() []Route {
- rts := []Route{}
-
- n.walk(func(eps endpoints, subroutes Routes) bool {
- if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
- return false
- }
-
- // Group methodHandlers by unique patterns
- pats := make(map[string]endpoints)
-
- for mt, h := range eps {
- if h.pattern == "" {
- continue
- }
- p, ok := pats[h.pattern]
- if !ok {
- p = endpoints{}
- pats[h.pattern] = p
- }
- p[mt] = h
- }
-
- for p, mh := range pats {
- hs := make(map[string]http.Handler)
- if mh[mALL] != nil && mh[mALL].handler != nil {
- hs["*"] = mh[mALL].handler
- }
-
- for mt, h := range mh {
- if h.handler == nil {
- continue
- }
- m := methodTypString(mt)
- if m == "" {
- continue
- }
- hs[m] = h.handler
- }
-
- rt := Route{p, hs, subroutes}
- rts = append(rts, rt)
- }
-
- return false
- })
-
- return rts
-}
-
-func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
- // Visit the leaf values if any
- if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
- return true
- }
-
- // Recurse on the children
- for _, ns := range n.children {
- for _, cn := range ns {
- if cn.walk(fn) {
- return true
- }
- }
- }
- return false
-}
-
-// patNextSegment returns the next segment details from a pattern:
-// node type, param key, regexp string, param tail byte, param starting index, param ending index
-func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
- ps := strings.Index(pattern, "{")
- ws := strings.Index(pattern, "*")
-
- if ps < 0 && ws < 0 {
- return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
- }
-
- // Sanity check
- if ps >= 0 && ws >= 0 && ws < ps {
- panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
- }
-
- var tail byte = '/' // Default endpoint tail to / byte
-
- if ps >= 0 {
- // Param/Regexp pattern is next
- nt := ntParam
-
- // Read to closing } taking into account opens and closes in curl count (cc)
- cc := 0
- pe := ps
- for i, c := range pattern[ps:] {
- if c == '{' {
- cc++
- } else if c == '}' {
- cc--
- if cc == 0 {
- pe = ps + i
- break
- }
- }
- }
- if pe == ps {
- panic("chi: route param closing delimiter '}' is missing")
- }
-
- key := pattern[ps+1 : pe]
- pe++ // set end to next position
-
- if pe < len(pattern) {
- tail = pattern[pe]
- }
-
- var rexpat string
- if idx := strings.Index(key, ":"); idx >= 0 {
- nt = ntRegexp
- rexpat = key[idx+1:]
- key = key[:idx]
- }
-
- if len(rexpat) > 0 {
- if rexpat[0] != '^' {
- rexpat = "^" + rexpat
- }
- if rexpat[len(rexpat)-1] != '$' {
- rexpat += "$"
- }
- }
-
- return nt, key, rexpat, tail, ps, pe
- }
-
- // Wildcard pattern as finale
- if ws < len(pattern)-1 {
- panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
- }
- return ntCatchAll, "*", "", 0, ws, len(pattern)
-}
-
-func patParamKeys(pattern string) []string {
- pat := pattern
- paramKeys := []string{}
- for {
- ptyp, paramKey, _, _, _, e := patNextSegment(pat)
- if ptyp == ntStatic {
- return paramKeys
- }
- for i := 0; i < len(paramKeys); i++ {
- if paramKeys[i] == paramKey {
- panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
- }
- }
- paramKeys = append(paramKeys, paramKey)
- pat = pat[e:]
- }
-}
-
-// longestPrefix finds the length of the shared prefix
-// of two strings
-func longestPrefix(k1, k2 string) int {
- max := len(k1)
- if l := len(k2); l < max {
- max = l
- }
- var i int
- for i = 0; i < max; i++ {
- if k1[i] != k2[i] {
- break
- }
- }
- return i
-}
-
-func methodTypString(method methodTyp) string {
- for s, t := range methodMap {
- if method == t {
- return s
- }
- }
- return ""
-}
-
-type nodes []*node
-
-// Sort the list of nodes by label
-func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
-func (ns nodes) Len() int { return len(ns) }
-func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
-func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
-
-// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
-// The list order determines the traversal order.
-func (ns nodes) tailSort() {
- for i := len(ns) - 1; i >= 0; i-- {
- if ns[i].typ > ntStatic && ns[i].tail == '/' {
- ns.Swap(i, len(ns)-1)
- return
- }
- }
-}
-
-func (ns nodes) findEdge(label byte) *node {
- num := len(ns)
- idx := 0
- i, j := 0, num-1
- for i <= j {
- idx = i + (j-i)/2
- if label > ns[idx].label {
- i = idx + 1
- } else if label < ns[idx].label {
- j = idx - 1
- } else {
- i = num // breaks cond
- }
- }
- if ns[idx].label != label {
- return nil
- }
- return ns[idx]
-}
-
-// Route describes the details of a routing handler.
-// Handlers map key is an HTTP method
-type Route struct {
- Pattern string
- Handlers map[string]http.Handler
- SubRoutes Routes
-}
-
-// WalkFunc is the type of the function called for each method and route visited by Walk.
-type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
-
-// Walk walks any router tree that implements Routes interface.
-func Walk(r Routes, walkFn WalkFunc) error {
- return walk(r, walkFn, "")
-}
-
-func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
- for _, route := range r.Routes() {
- mws := make([]func(http.Handler) http.Handler, len(parentMw))
- copy(mws, parentMw)
- mws = append(mws, r.Middlewares()...)
-
- if route.SubRoutes != nil {
- if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
- return err
- }
- continue
- }
-
- for method, handler := range route.Handlers {
- if method == "*" {
- // Ignore a "catchAll" method, since we pass down all the specific methods for each route.
- continue
- }
-
- fullRoute := parentRoute + route.Pattern
- fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
-
- if chain, ok := handler.(*ChainHandler); ok {
- if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
- return err
- }
- } else {
- if err := walkFn(method, fullRoute, handler, mws...); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore
deleted file mode 100644
index daf913b1b3..0000000000
--- a/vendor/github.com/go-playground/locales/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml
deleted file mode 100644
index d50237a608..0000000000
--- a/vendor/github.com/go-playground/locales/.travis.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-language: go
-go:
- - 1.13.1
- - tip
-matrix:
- allow_failures:
- - go: tip
-
-notifications:
- email:
- recipients: dean.karn@gmail.com
- on_success: change
- on_failure: always
-
-before_install:
- - go install github.com/mattn/goveralls
-
-# Only clone the most recent commit.
-git:
- depth: 1
-
-script:
- - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
-
-after_success: |
- goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE
deleted file mode 100644
index 75854ac4f0..0000000000
--- a/vendor/github.com/go-playground/locales/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Go Playground
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md
deleted file mode 100644
index 7b6be2c647..0000000000
--- a/vendor/github.com/go-playground/locales/README.md
+++ /dev/null
@@ -1,170 +0,0 @@
-## locales
-![Project status](https://img.shields.io/badge/version-0.14.1-green.svg)
-[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
-[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
-![License](https://img.shields.io/dub/l/vibe-d.svg)
-
-Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
-an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
-
-Features
---------
-- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
-- [x] Contains Cardinal, Ordinal and Range Plural Rules
-- [x] Contains Month, Weekday and Timezone translations built in
-- [x] Contains Date & Time formatting functions
-- [x] Contains Number, Currency, Accounting and Percent formatting functions
-- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
-
-Full Tests
---------------------
-I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
-any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
-
-Installation
------------
-
-Use go get
-
-```shell
-go get github.com/go-playground/locales
-```
-
-NOTES
---------
-You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
-of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
-
-Usage
--------
-```go
-package main
-
-import (
- "fmt"
- "time"
-
- "github.com/go-playground/locales/currency"
- "github.com/go-playground/locales/en_CA"
-)
-
-func main() {
-
- loc, _ := time.LoadLocation("America/Toronto")
- datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
-
- l := en_CA.New()
-
- // Dates
- fmt.Println(l.FmtDateFull(datetime))
- fmt.Println(l.FmtDateLong(datetime))
- fmt.Println(l.FmtDateMedium(datetime))
- fmt.Println(l.FmtDateShort(datetime))
-
- // Times
- fmt.Println(l.FmtTimeFull(datetime))
- fmt.Println(l.FmtTimeLong(datetime))
- fmt.Println(l.FmtTimeMedium(datetime))
- fmt.Println(l.FmtTimeShort(datetime))
-
- // Months Wide
- fmt.Println(l.MonthWide(time.January))
- fmt.Println(l.MonthWide(time.February))
- fmt.Println(l.MonthWide(time.March))
- // ...
-
- // Months Abbreviated
- fmt.Println(l.MonthAbbreviated(time.January))
- fmt.Println(l.MonthAbbreviated(time.February))
- fmt.Println(l.MonthAbbreviated(time.March))
- // ...
-
- // Months Narrow
- fmt.Println(l.MonthNarrow(time.January))
- fmt.Println(l.MonthNarrow(time.February))
- fmt.Println(l.MonthNarrow(time.March))
- // ...
-
- // Weekdays Wide
- fmt.Println(l.WeekdayWide(time.Sunday))
- fmt.Println(l.WeekdayWide(time.Monday))
- fmt.Println(l.WeekdayWide(time.Tuesday))
- // ...
-
- // Weekdays Abbreviated
- fmt.Println(l.WeekdayAbbreviated(time.Sunday))
- fmt.Println(l.WeekdayAbbreviated(time.Monday))
- fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
- // ...
-
- // Weekdays Short
- fmt.Println(l.WeekdayShort(time.Sunday))
- fmt.Println(l.WeekdayShort(time.Monday))
- fmt.Println(l.WeekdayShort(time.Tuesday))
- // ...
-
- // Weekdays Narrow
- fmt.Println(l.WeekdayNarrow(time.Sunday))
- fmt.Println(l.WeekdayNarrow(time.Monday))
- fmt.Println(l.WeekdayNarrow(time.Tuesday))
- // ...
-
- var f64 float64
-
- f64 = -10356.4523
-
- // Number
- fmt.Println(l.FmtNumber(f64, 2))
-
- // Currency
- fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
- fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
-
- // Accounting
- fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
- fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
-
- f64 = 78.12
-
- // Percent
- fmt.Println(l.FmtPercent(f64, 0))
-
- // Plural Rules for locale, so you know what rules you must cover
- fmt.Println(l.PluralsCardinal())
- fmt.Println(l.PluralsOrdinal())
-
- // Cardinal Plural Rules
- fmt.Println(l.CardinalPluralRule(1, 0))
- fmt.Println(l.CardinalPluralRule(1.0, 0))
- fmt.Println(l.CardinalPluralRule(1.0, 1))
- fmt.Println(l.CardinalPluralRule(3, 0))
-
- // Ordinal Plural Rules
- fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
- fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
- fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
- fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
-
- // Range Plural Rules
- fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
- fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
- fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
-}
-```
-
-NOTES:
--------
-These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
-I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
-these locales are regenerated the fix will come with.
-
-I do however realize that time constraints are often important and so there are two options:
-
-1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
-2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
-
-Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
-
-License
-------
-Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go
deleted file mode 100644
index b5a95fb074..0000000000
--- a/vendor/github.com/go-playground/locales/currency/currency.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package currency
-
-// Type is the currency type associated with the locales currency enum
-type Type int
-
-// locale currencies
-const (
- ADP Type = iota
- AED
- AFA
- AFN
- ALK
- ALL
- AMD
- ANG
- AOA
- AOK
- AON
- AOR
- ARA
- ARL
- ARM
- ARP
- ARS
- ATS
- AUD
- AWG
- AZM
- AZN
- BAD
- BAM
- BAN
- BBD
- BDT
- BEC
- BEF
- BEL
- BGL
- BGM
- BGN
- BGO
- BHD
- BIF
- BMD
- BND
- BOB
- BOL
- BOP
- BOV
- BRB
- BRC
- BRE
- BRL
- BRN
- BRR
- BRZ
- BSD
- BTN
- BUK
- BWP
- BYB
- BYN
- BYR
- BZD
- CAD
- CDF
- CHE
- CHF
- CHW
- CLE
- CLF
- CLP
- CNH
- CNX
- CNY
- COP
- COU
- CRC
- CSD
- CSK
- CUC
- CUP
- CVE
- CYP
- CZK
- DDM
- DEM
- DJF
- DKK
- DOP
- DZD
- ECS
- ECV
- EEK
- EGP
- ERN
- ESA
- ESB
- ESP
- ETB
- EUR
- FIM
- FJD
- FKP
- FRF
- GBP
- GEK
- GEL
- GHC
- GHS
- GIP
- GMD
- GNF
- GNS
- GQE
- GRD
- GTQ
- GWE
- GWP
- GYD
- HKD
- HNL
- HRD
- HRK
- HTG
- HUF
- IDR
- IEP
- ILP
- ILR
- ILS
- INR
- IQD
- IRR
- ISJ
- ISK
- ITL
- JMD
- JOD
- JPY
- KES
- KGS
- KHR
- KMF
- KPW
- KRH
- KRO
- KRW
- KWD
- KYD
- KZT
- LAK
- LBP
- LKR
- LRD
- LSL
- LTL
- LTT
- LUC
- LUF
- LUL
- LVL
- LVR
- LYD
- MAD
- MAF
- MCF
- MDC
- MDL
- MGA
- MGF
- MKD
- MKN
- MLF
- MMK
- MNT
- MOP
- MRO
- MRU
- MTL
- MTP
- MUR
- MVP
- MVR
- MWK
- MXN
- MXP
- MXV
- MYR
- MZE
- MZM
- MZN
- NAD
- NGN
- NIC
- NIO
- NLG
- NOK
- NPR
- NZD
- OMR
- PAB
- PEI
- PEN
- PES
- PGK
- PHP
- PKR
- PLN
- PLZ
- PTE
- PYG
- QAR
- RHD
- ROL
- RON
- RSD
- RUB
- RUR
- RWF
- SAR
- SBD
- SCR
- SDD
- SDG
- SDP
- SEK
- SGD
- SHP
- SIT
- SKK
- SLL
- SOS
- SRD
- SRG
- SSP
- STD
- STN
- SUR
- SVC
- SYP
- SZL
- THB
- TJR
- TJS
- TMM
- TMT
- TND
- TOP
- TPE
- TRL
- TRY
- TTD
- TWD
- TZS
- UAH
- UAK
- UGS
- UGX
- USD
- USN
- USS
- UYI
- UYP
- UYU
- UYW
- UZS
- VEB
- VEF
- VES
- VND
- VNN
- VUV
- WST
- XAF
- XAG
- XAU
- XBA
- XBB
- XBC
- XBD
- XCD
- XDR
- XEU
- XFO
- XFU
- XOF
- XPD
- XPF
- XPT
- XRE
- XSU
- XTS
- XUA
- XXX
- YDD
- YER
- YUD
- YUM
- YUN
- YUR
- ZAL
- ZAR
- ZMK
- ZMW
- ZRN
- ZRZ
- ZWD
- ZWL
- ZWR
-)
diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png
deleted file mode 100644
index 3038276e68..0000000000
Binary files a/vendor/github.com/go-playground/locales/logo.png and /dev/null differ
diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go
deleted file mode 100644
index 9202900149..0000000000
--- a/vendor/github.com/go-playground/locales/rules.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package locales
-
-import (
- "strconv"
- "time"
-
- "github.com/go-playground/locales/currency"
-)
-
-// // ErrBadNumberValue is returned when the number passed for
-// // plural rule determination cannot be parsed
-// type ErrBadNumberValue struct {
-// NumberValue string
-// InnerError error
-// }
-
-// // Error returns ErrBadNumberValue error string
-// func (e *ErrBadNumberValue) Error() string {
-// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
-// }
-
-// var _ error = new(ErrBadNumberValue)
-
-// PluralRule denotes the type of plural rules
-type PluralRule int
-
-// PluralRule's
-const (
- PluralRuleUnknown PluralRule = iota
- PluralRuleZero // zero
- PluralRuleOne // one - singular
- PluralRuleTwo // two - dual
- PluralRuleFew // few - paucal
- PluralRuleMany // many - also used for fractions if they have a separate class
- PluralRuleOther // other - required—general plural form—also used if the language only has a single form
-)
-
-const (
- pluralsString = "UnknownZeroOneTwoFewManyOther"
-)
-
-// Translator encapsulates an instance of a locale
-// NOTE: some values are returned as a []byte just in case the caller
-// wishes to add more and can help avoid allocations; otherwise just cast as string
-type Translator interface {
-
- // The following Functions are for overriding, debugging or developing
- // with a Translator Locale
-
- // Locale returns the string value of the translator
- Locale() string
-
- // returns an array of cardinal plural rules associated
- // with this translator
- PluralsCardinal() []PluralRule
-
- // returns an array of ordinal plural rules associated
- // with this translator
- PluralsOrdinal() []PluralRule
-
- // returns an array of range plural rules associated
- // with this translator
- PluralsRange() []PluralRule
-
- // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
- CardinalPluralRule(num float64, v uint64) PluralRule
-
- // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
- OrdinalPluralRule(num float64, v uint64) PluralRule
-
- // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
- RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
-
- // returns the locales abbreviated month given the 'month' provided
- MonthAbbreviated(month time.Month) string
-
- // returns the locales abbreviated months
- MonthsAbbreviated() []string
-
- // returns the locales narrow month given the 'month' provided
- MonthNarrow(month time.Month) string
-
- // returns the locales narrow months
- MonthsNarrow() []string
-
- // returns the locales wide month given the 'month' provided
- MonthWide(month time.Month) string
-
- // returns the locales wide months
- MonthsWide() []string
-
- // returns the locales abbreviated weekday given the 'weekday' provided
- WeekdayAbbreviated(weekday time.Weekday) string
-
- // returns the locales abbreviated weekdays
- WeekdaysAbbreviated() []string
-
- // returns the locales narrow weekday given the 'weekday' provided
- WeekdayNarrow(weekday time.Weekday) string
-
- // WeekdaysNarrowreturns the locales narrow weekdays
- WeekdaysNarrow() []string
-
- // returns the locales short weekday given the 'weekday' provided
- WeekdayShort(weekday time.Weekday) string
-
- // returns the locales short weekdays
- WeekdaysShort() []string
-
- // returns the locales wide weekday given the 'weekday' provided
- WeekdayWide(weekday time.Weekday) string
-
- // returns the locales wide weekdays
- WeekdaysWide() []string
-
- // The following Functions are common Formatting functionsfor the Translator's Locale
-
- // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
- FmtNumber(num float64, v uint64) string
-
- // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
- // NOTE: 'num' passed into FmtPercent is assumed to be in percent already
- FmtPercent(num float64, v uint64) string
-
- // returns the currency representation of 'num' with digits/precision of 'v' for locale
- FmtCurrency(num float64, v uint64, currency currency.Type) string
-
- // returns the currency representation of 'num' with digits/precision of 'v' for locale
- // in accounting notation.
- FmtAccounting(num float64, v uint64, currency currency.Type) string
-
- // returns the short date representation of 't' for locale
- FmtDateShort(t time.Time) string
-
- // returns the medium date representation of 't' for locale
- FmtDateMedium(t time.Time) string
-
- // returns the long date representation of 't' for locale
- FmtDateLong(t time.Time) string
-
- // returns the full date representation of 't' for locale
- FmtDateFull(t time.Time) string
-
- // returns the short time representation of 't' for locale
- FmtTimeShort(t time.Time) string
-
- // returns the medium time representation of 't' for locale
- FmtTimeMedium(t time.Time) string
-
- // returns the long time representation of 't' for locale
- FmtTimeLong(t time.Time) string
-
- // returns the full time representation of 't' for locale
- FmtTimeFull(t time.Time) string
-}
-
-// String returns the string value of PluralRule
-func (p PluralRule) String() string {
-
- switch p {
- case PluralRuleZero:
- return pluralsString[7:11]
- case PluralRuleOne:
- return pluralsString[11:14]
- case PluralRuleTwo:
- return pluralsString[14:17]
- case PluralRuleFew:
- return pluralsString[17:20]
- case PluralRuleMany:
- return pluralsString[20:24]
- case PluralRuleOther:
- return pluralsString[24:]
- default:
- return pluralsString[:7]
- }
-}
-
-//
-// Precision Notes:
-//
-// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
-//
-// v := float64(3.141)
-// i := float64(int64(v))
-//
-// fmt.Println(v - i)
-//
-// or
-//
-// s := strconv.FormatFloat(v-i, 'f', -1, 64)
-// fmt.Println(s)
-//
-// these will not print what you'd expect: 0.14100000000000001
-// and so this library requires a precision to be specified, or
-// inaccurate plural rules could be applied.
-//
-//
-//
-// n - absolute value of the source number (integer and decimals).
-// i - integer digits of n.
-// v - number of visible fraction digits in n, with trailing zeros.
-// w - number of visible fraction digits in n, without trailing zeros.
-// f - visible fractional digits in n, with trailing zeros.
-// t - visible fractional digits in n, without trailing zeros.
-//
-//
-// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
-//
-// n := math.Abs(num)
-// i := int64(n)
-// v := v
-//
-//
-// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
-// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
-// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
-//
-//
-//
-// General Inclusion Rules
-// - v will always be available inherently
-// - all require n
-// - w requires i
-//
-
-// W returns the number of visible fraction digits in N, without trailing zeros.
-func W(n float64, v uint64) (w int64) {
-
- s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
- // with either be '0' or '0.xxxx', so if 1 then w will be zero
- // otherwise need to parse
- if len(s) != 1 {
-
- s = s[2:]
- end := len(s) + 1
-
- for i := end; i >= 0; i-- {
- if s[i] != '0' {
- end = i + 1
- break
- }
- }
-
- w = int64(len(s[:end]))
- }
-
- return
-}
-
-// F returns the visible fractional digits in N, with trailing zeros.
-func F(n float64, v uint64) (f int64) {
-
- s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
- // with either be '0' or '0.xxxx', so if 1 then f will be zero
- // otherwise need to parse
- if len(s) != 1 {
-
- // ignoring error, because it can't fail as we generated
- // the string internally from a real number
- f, _ = strconv.ParseInt(s[2:], 10, 64)
- }
-
- return
-}
-
-// T returns the visible fractional digits in N, without trailing zeros.
-func T(n float64, v uint64) (t int64) {
-
- s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
-
- // with either be '0' or '0.xxxx', so if 1 then t will be zero
- // otherwise need to parse
- if len(s) != 1 {
-
- s = s[2:]
- end := len(s) + 1
-
- for i := end; i >= 0; i-- {
- if s[i] != '0' {
- end = i + 1
- break
- }
- }
-
- // ignoring error, because it can't fail as we generated
- // the string internally from a real number
- t, _ = strconv.ParseInt(s[:end], 10, 64)
- }
-
- return
-}
diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore
deleted file mode 100644
index bc4e07f34e..0000000000
--- a/vendor/github.com/go-playground/universal-translator/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-*.coverprofile
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml
deleted file mode 100644
index 39b8b923e4..0000000000
--- a/vendor/github.com/go-playground/universal-translator/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: go
-go:
- - 1.13.4
- - tip
-matrix:
- allow_failures:
- - go: tip
-
-notifications:
- email:
- recipients: dean.karn@gmail.com
- on_success: change
- on_failure: always
-
-before_install:
- - go install github.com/mattn/goveralls
-
-# Only clone the most recent commit.
-git:
- depth: 1
-
-script:
- - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
-
-after_success: |
- [ $TRAVIS_GO_VERSION = 1.13.4 ] &&
- goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE
deleted file mode 100644
index 8d8aba15ba..0000000000
--- a/vendor/github.com/go-playground/universal-translator/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Go Playground
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/go-playground/universal-translator/Makefile b/vendor/github.com/go-playground/universal-translator/Makefile
deleted file mode 100644
index ec3455bd59..0000000000
--- a/vendor/github.com/go-playground/universal-translator/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-GOCMD=GO111MODULE=on go
-
-linters-install:
- @golangci-lint --version >/dev/null 2>&1 || { \
- echo "installing linting tools..."; \
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
- }
-
-lint: linters-install
- golangci-lint run
-
-test:
- $(GOCMD) test -cover -race ./...
-
-bench:
- $(GOCMD) test -bench=. -benchmem ./...
-
-.PHONY: test lint linters-install
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md
deleted file mode 100644
index d9b6654741..0000000000
--- a/vendor/github.com/go-playground/universal-translator/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-## universal-translator
-![Project status](https://img.shields.io/badge/version-0.18.1-green.svg)
-[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
-[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
-[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
-![License](https://img.shields.io/dub/l/vibe-d.svg)
-
-Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
-
-Why another i18n library?
---------------------------
-Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
-so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
-is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
-use in your applications.
-
-Features
---------
-- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
-- [x] Contains Cardinal, Ordinal and Range Plural Rules
-- [x] Contains Month, Weekday and Timezone translations built in
-- [x] Contains Date & Time formatting functions
-- [x] Contains Number, Currency, Accounting and Percent formatting functions
-- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
-- [x] Support loading translations from files
-- [x] Exporting translations to file(s), mainly for getting them professionally translated
-- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
-- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
-
-Installation
------------
-
-Use go get
-
-```shell
-go get github.com/go-playground/universal-translator
-```
-
-Usage & Documentation
--------
-
-Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
-
-##### Examples:
-
-- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic)
-- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files)
-- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files)
-
-File formatting
---------------
-All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s);
-they are only separated for easy viewing.
-
-##### Examples:
-
-- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
-
-##### Basic Makeup
-NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
-```json
-{
- "locale": "en",
- "key": "days-left",
- "trans": "You have {0} day left.",
- "type": "Cardinal",
- "rule": "One",
- "override": false
-}
-```
-|Field|Description|
-|---|---|
-|locale|The locale for which the translation is for.|
-|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
-|trans|The actual translation text.|
-|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
-|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
-|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
-
-Help With Tests
----------------
-To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
-Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
-
-License
-------
-Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go
deleted file mode 100644
index 38b163b626..0000000000
--- a/vendor/github.com/go-playground/universal-translator/errors.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package ut
-
-import (
- "errors"
- "fmt"
-
- "github.com/go-playground/locales"
-)
-
-var (
- // ErrUnknowTranslation indicates the translation could not be found
- ErrUnknowTranslation = errors.New("Unknown Translation")
-)
-
-var _ error = new(ErrConflictingTranslation)
-var _ error = new(ErrRangeTranslation)
-var _ error = new(ErrOrdinalTranslation)
-var _ error = new(ErrCardinalTranslation)
-var _ error = new(ErrMissingPluralTranslation)
-var _ error = new(ErrExistingTranslator)
-
-// ErrExistingTranslator is the error representing a conflicting translator
-type ErrExistingTranslator struct {
- locale string
-}
-
-// Error returns ErrExistingTranslator's internal error text
-func (e *ErrExistingTranslator) Error() string {
- return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
-}
-
-// ErrConflictingTranslation is the error representing a conflicting translation
-type ErrConflictingTranslation struct {
- locale string
- key interface{}
- rule locales.PluralRule
- text string
-}
-
-// Error returns ErrConflictingTranslation's internal error text
-func (e *ErrConflictingTranslation) Error() string {
-
- if _, ok := e.key.(string); !ok {
- return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
- }
-
- return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
-}
-
-// ErrRangeTranslation is the error representing a range translation error
-type ErrRangeTranslation struct {
- text string
-}
-
-// Error returns ErrRangeTranslation's internal error text
-func (e *ErrRangeTranslation) Error() string {
- return e.text
-}
-
-// ErrOrdinalTranslation is the error representing an ordinal translation error
-type ErrOrdinalTranslation struct {
- text string
-}
-
-// Error returns ErrOrdinalTranslation's internal error text
-func (e *ErrOrdinalTranslation) Error() string {
- return e.text
-}
-
-// ErrCardinalTranslation is the error representing a cardinal translation error
-type ErrCardinalTranslation struct {
- text string
-}
-
-// Error returns ErrCardinalTranslation's internal error text
-func (e *ErrCardinalTranslation) Error() string {
- return e.text
-}
-
-// ErrMissingPluralTranslation is the error signifying a missing translation given
-// the locales plural rules.
-type ErrMissingPluralTranslation struct {
- locale string
- key interface{}
- rule locales.PluralRule
- translationType string
-}
-
-// Error returns ErrMissingPluralTranslation's internal error text
-func (e *ErrMissingPluralTranslation) Error() string {
-
- if _, ok := e.key.(string); !ok {
- return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
- }
-
- return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
-}
-
-// ErrMissingBracket is the error representing a missing bracket in a translation
-// eg. This is a {0 <-- missing ending '}'
-type ErrMissingBracket struct {
- locale string
- key interface{}
- text string
-}
-
-// Error returns ErrMissingBracket error message
-func (e *ErrMissingBracket) Error() string {
- return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
-}
-
-// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
-// eg. This is a {must-be-int}
-type ErrBadParamSyntax struct {
- locale string
- param string
- key interface{}
- text string
-}
-
-// Error returns ErrBadParamSyntax error message
-func (e *ErrBadParamSyntax) Error() string {
- return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
-}
-
-// import/export errors
-
-// ErrMissingLocale is the error representing an expected locale that could
-// not be found aka locale not registered with the UniversalTranslator Instance
-type ErrMissingLocale struct {
- locale string
-}
-
-// Error returns ErrMissingLocale's internal error text
-func (e *ErrMissingLocale) Error() string {
- return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
-}
-
-// ErrBadPluralDefinition is the error representing an incorrect plural definition
-// usually found within translations defined within files during the import process.
-type ErrBadPluralDefinition struct {
- tl translation
-}
-
-// Error returns ErrBadPluralDefinition's internal error text
-func (e *ErrBadPluralDefinition) Error() string {
- return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
-}
diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go
deleted file mode 100644
index 87a1b465cb..0000000000
--- a/vendor/github.com/go-playground/universal-translator/import_export.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package ut
-
-import (
- "encoding/json"
- "fmt"
- "os"
- "path/filepath"
-
- "io"
-
- "github.com/go-playground/locales"
-)
-
-type translation struct {
- Locale string `json:"locale"`
- Key interface{} `json:"key"` // either string or integer
- Translation string `json:"trans"`
- PluralType string `json:"type,omitempty"`
- PluralRule string `json:"rule,omitempty"`
- OverrideExisting bool `json:"override,omitempty"`
-}
-
-const (
- cardinalType = "Cardinal"
- ordinalType = "Ordinal"
- rangeType = "Range"
-)
-
-// ImportExportFormat is the format of the file import or export
-type ImportExportFormat uint8
-
-// supported Export Formats
-const (
- FormatJSON ImportExportFormat = iota
-)
-
-// Export writes the translations out to a file on disk.
-//
-// NOTE: this currently only works with string or int translations keys.
-func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
-
- _, err := os.Stat(dirname)
- if err != nil {
-
- if !os.IsNotExist(err) {
- return err
- }
-
- if err = os.MkdirAll(dirname, 0744); err != nil {
- return err
- }
- }
-
- // build up translations
- var trans []translation
- var b []byte
- var ext string
-
- for _, locale := range t.translators {
-
- for k, v := range locale.(*translator).translations {
- trans = append(trans, translation{
- Locale: locale.Locale(),
- Key: k,
- Translation: v.text,
- })
- }
-
- for k, pluralTrans := range locale.(*translator).cardinalTanslations {
-
- for i, plural := range pluralTrans {
-
- // leave enough for all plural rules
- // but not all are set for all languages.
- if plural == nil {
- continue
- }
-
- trans = append(trans, translation{
- Locale: locale.Locale(),
- Key: k.(string),
- Translation: plural.text,
- PluralType: cardinalType,
- PluralRule: locales.PluralRule(i).String(),
- })
- }
- }
-
- for k, pluralTrans := range locale.(*translator).ordinalTanslations {
-
- for i, plural := range pluralTrans {
-
- // leave enough for all plural rules
- // but not all are set for all languages.
- if plural == nil {
- continue
- }
-
- trans = append(trans, translation{
- Locale: locale.Locale(),
- Key: k.(string),
- Translation: plural.text,
- PluralType: ordinalType,
- PluralRule: locales.PluralRule(i).String(),
- })
- }
- }
-
- for k, pluralTrans := range locale.(*translator).rangeTanslations {
-
- for i, plural := range pluralTrans {
-
- // leave enough for all plural rules
- // but not all are set for all languages.
- if plural == nil {
- continue
- }
-
- trans = append(trans, translation{
- Locale: locale.Locale(),
- Key: k.(string),
- Translation: plural.text,
- PluralType: rangeType,
- PluralRule: locales.PluralRule(i).String(),
- })
- }
- }
-
- switch format {
- case FormatJSON:
- b, err = json.MarshalIndent(trans, "", " ")
- ext = ".json"
- }
-
- if err != nil {
- return err
- }
-
- err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
- if err != nil {
- return err
- }
-
- trans = trans[0:0]
- }
-
- return nil
-}
-
-// Import reads the translations out of a file or directory on disk.
-//
-// NOTE: this currently only works with string or int translations keys.
-func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
-
- fi, err := os.Stat(dirnameOrFilename)
- if err != nil {
- return err
- }
-
- processFn := func(filename string) error {
-
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- defer f.Close()
-
- return t.ImportByReader(format, f)
- }
-
- if !fi.IsDir() {
- return processFn(dirnameOrFilename)
- }
-
- // recursively go through directory
- walker := func(path string, info os.FileInfo, err error) error {
-
- if info.IsDir() {
- return nil
- }
-
- switch format {
- case FormatJSON:
- // skip non JSON files
- if filepath.Ext(info.Name()) != ".json" {
- return nil
- }
- }
-
- return processFn(path)
- }
-
- return filepath.Walk(dirnameOrFilename, walker)
-}
-
-// ImportByReader imports the the translations found within the contents read from the supplied reader.
-//
-// NOTE: generally used when assets have been embedded into the binary and are already in memory.
-func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
-
- b, err := io.ReadAll(reader)
- if err != nil {
- return err
- }
-
- var trans []translation
-
- switch format {
- case FormatJSON:
- err = json.Unmarshal(b, &trans)
- }
-
- if err != nil {
- return err
- }
-
- for _, tl := range trans {
-
- locale, found := t.FindTranslator(tl.Locale)
- if !found {
- return &ErrMissingLocale{locale: tl.Locale}
- }
-
- pr := stringToPR(tl.PluralRule)
-
- if pr == locales.PluralRuleUnknown {
-
- err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
- if err != nil {
- return err
- }
-
- continue
- }
-
- switch tl.PluralType {
- case cardinalType:
- err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
- case ordinalType:
- err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
- case rangeType:
- err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
- default:
- return &ErrBadPluralDefinition{tl: tl}
- }
-
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func stringToPR(s string) locales.PluralRule {
-
- switch s {
- case "Zero":
- return locales.PluralRuleZero
- case "One":
- return locales.PluralRuleOne
- case "Two":
- return locales.PluralRuleTwo
- case "Few":
- return locales.PluralRuleFew
- case "Many":
- return locales.PluralRuleMany
- case "Other":
- return locales.PluralRuleOther
- default:
- return locales.PluralRuleUnknown
- }
-
-}
diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png
deleted file mode 100644
index a37aa8c0cd..0000000000
Binary files a/vendor/github.com/go-playground/universal-translator/logo.png and /dev/null differ
diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go
deleted file mode 100644
index 24b18db92a..0000000000
--- a/vendor/github.com/go-playground/universal-translator/translator.go
+++ /dev/null
@@ -1,420 +0,0 @@
-package ut
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "github.com/go-playground/locales"
-)
-
-const (
- paramZero = "{0}"
- paramOne = "{1}"
- unknownTranslation = ""
-)
-
-// Translator is universal translators
-// translator instance which is a thin wrapper
-// around locales.Translator instance providing
-// some extra functionality
-type Translator interface {
- locales.Translator
-
- // adds a normal translation for a particular language/locale
- // {#} is the only replacement type accepted and are ad infinitum
- // eg. one: '{0} day left' other: '{0} days left'
- Add(key interface{}, text string, override bool) error
-
- // adds a cardinal plural translation for a particular language/locale
- // {0} is the only replacement type accepted and only one variable is accepted as
- // multiple cannot be used for a plural rule determination, unless it is a range;
- // see AddRange below.
- // eg. in locale 'en' one: '{0} day left' other: '{0} days left'
- AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
-
- // adds an ordinal plural translation for a particular language/locale
- // {0} is the only replacement type accepted and only one variable is accepted as
- // multiple cannot be used for a plural rule determination, unless it is a range;
- // see AddRange below.
- // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
- // - 1st, 2nd, 3rd...
- AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
-
- // adds a range plural translation for a particular language/locale
- // {0} and {1} are the only replacement types accepted and only these are accepted.
- // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
- AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
-
- // creates the translation for the locale given the 'key' and params passed in
- T(key interface{}, params ...string) (string, error)
-
- // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
- // and param passed in
- C(key interface{}, num float64, digits uint64, param string) (string, error)
-
- // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
- // and param passed in
- O(key interface{}, num float64, digits uint64, param string) (string, error)
-
- // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
- // 'digit2' arguments and 'param1' and 'param2' passed in
- R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
-
- // VerifyTranslations checks to ensures that no plural rules have been
- // missed within the translations.
- VerifyTranslations() error
-}
-
-var _ Translator = new(translator)
-var _ locales.Translator = new(translator)
-
-type translator struct {
- locales.Translator
- translations map[interface{}]*transText
- cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
- ordinalTanslations map[interface{}][]*transText
- rangeTanslations map[interface{}][]*transText
-}
-
-type transText struct {
- text string
- indexes []int
-}
-
-func newTranslator(trans locales.Translator) Translator {
- return &translator{
- Translator: trans,
- translations: make(map[interface{}]*transText), // translation text broken up by byte index
- cardinalTanslations: make(map[interface{}][]*transText),
- ordinalTanslations: make(map[interface{}][]*transText),
- rangeTanslations: make(map[interface{}][]*transText),
- }
-}
-
-// Add adds a normal translation for a particular language/locale
-// {#} is the only replacement type accepted and are ad infinitum
-// eg. one: '{0} day left' other: '{0} days left'
-func (t *translator) Add(key interface{}, text string, override bool) error {
-
- if _, ok := t.translations[key]; ok && !override {
- return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
- }
-
- lb := strings.Count(text, "{")
- rb := strings.Count(text, "}")
-
- if lb != rb {
- return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
- }
-
- trans := &transText{
- text: text,
- }
-
- var idx int
-
- for i := 0; i < lb; i++ {
- s := "{" + strconv.Itoa(i) + "}"
- idx = strings.Index(text, s)
- if idx == -1 {
- return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
- }
-
- trans.indexes = append(trans.indexes, idx)
- trans.indexes = append(trans.indexes, idx+len(s))
- }
-
- t.translations[key] = trans
-
- return nil
-}
-
-// AddCardinal adds a cardinal plural translation for a particular language/locale
-// {0} is the only replacement type accepted and only one variable is accepted as
-// multiple cannot be used for a plural rule determination, unless it is a range;
-// see AddRange below.
-// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
-func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
- var verified bool
-
- // verify plural rule exists for locale
- for _, pr := range t.PluralsCardinal() {
- if pr == rule {
- verified = true
- break
- }
- }
-
- if !verified {
- return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
- }
-
- tarr, ok := t.cardinalTanslations[key]
- if ok {
- // verify not adding a conflicting record
- if len(tarr) > 0 && tarr[rule] != nil && !override {
- return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
- }
-
- } else {
- tarr = make([]*transText, 7)
- t.cardinalTanslations[key] = tarr
- }
-
- trans := &transText{
- text: text,
- indexes: make([]int, 2),
- }
-
- tarr[rule] = trans
-
- idx := strings.Index(text, paramZero)
- if idx == -1 {
- tarr[rule] = nil
- return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
- }
-
- trans.indexes[0] = idx
- trans.indexes[1] = idx + len(paramZero)
-
- return nil
-}
-
-// AddOrdinal adds an ordinal plural translation for a particular language/locale
-// {0} is the only replacement type accepted and only one variable is accepted as
-// multiple cannot be used for a plural rule determination, unless it is a range;
-// see AddRange below.
-// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
-func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
- var verified bool
-
- // verify plural rule exists for locale
- for _, pr := range t.PluralsOrdinal() {
- if pr == rule {
- verified = true
- break
- }
- }
-
- if !verified {
- return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
- }
-
- tarr, ok := t.ordinalTanslations[key]
- if ok {
- // verify not adding a conflicting record
- if len(tarr) > 0 && tarr[rule] != nil && !override {
- return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
- }
-
- } else {
- tarr = make([]*transText, 7)
- t.ordinalTanslations[key] = tarr
- }
-
- trans := &transText{
- text: text,
- indexes: make([]int, 2),
- }
-
- tarr[rule] = trans
-
- idx := strings.Index(text, paramZero)
- if idx == -1 {
- tarr[rule] = nil
- return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
- }
-
- trans.indexes[0] = idx
- trans.indexes[1] = idx + len(paramZero)
-
- return nil
-}
-
-// AddRange adds a range plural translation for a particular language/locale
-// {0} and {1} are the only replacement types accepted and only these are accepted.
-// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
-func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
-
- var verified bool
-
- // verify plural rule exists for locale
- for _, pr := range t.PluralsRange() {
- if pr == rule {
- verified = true
- break
- }
- }
-
- if !verified {
- return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
- }
-
- tarr, ok := t.rangeTanslations[key]
- if ok {
- // verify not adding a conflicting record
- if len(tarr) > 0 && tarr[rule] != nil && !override {
- return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
- }
-
- } else {
- tarr = make([]*transText, 7)
- t.rangeTanslations[key] = tarr
- }
-
- trans := &transText{
- text: text,
- indexes: make([]int, 4),
- }
-
- tarr[rule] = trans
-
- idx := strings.Index(text, paramZero)
- if idx == -1 {
- tarr[rule] = nil
- return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
- }
-
- trans.indexes[0] = idx
- trans.indexes[1] = idx + len(paramZero)
-
- idx = strings.Index(text, paramOne)
- if idx == -1 {
- tarr[rule] = nil
- return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
- }
-
- trans.indexes[2] = idx
- trans.indexes[3] = idx + len(paramOne)
-
- return nil
-}
-
-// T creates the translation for the locale given the 'key' and params passed in
-func (t *translator) T(key interface{}, params ...string) (string, error) {
-
- trans, ok := t.translations[key]
- if !ok {
- return unknownTranslation, ErrUnknowTranslation
- }
-
- b := make([]byte, 0, 64)
-
- var start, end, count int
-
- for i := 0; i < len(trans.indexes); i++ {
- end = trans.indexes[i]
- b = append(b, trans.text[start:end]...)
- b = append(b, params[count]...)
- i++
- start = trans.indexes[i]
- count++
- }
-
- b = append(b, trans.text[start:]...)
-
- return string(b), nil
-}
-
-// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
-func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
-
- tarr, ok := t.cardinalTanslations[key]
- if !ok {
- return unknownTranslation, ErrUnknowTranslation
- }
-
- rule := t.CardinalPluralRule(num, digits)
-
- trans := tarr[rule]
-
- b := make([]byte, 0, 64)
- b = append(b, trans.text[:trans.indexes[0]]...)
- b = append(b, param...)
- b = append(b, trans.text[trans.indexes[1]:]...)
-
- return string(b), nil
-}
-
-// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
-func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
-
- tarr, ok := t.ordinalTanslations[key]
- if !ok {
- return unknownTranslation, ErrUnknowTranslation
- }
-
- rule := t.OrdinalPluralRule(num, digits)
-
- trans := tarr[rule]
-
- b := make([]byte, 0, 64)
- b = append(b, trans.text[:trans.indexes[0]]...)
- b = append(b, param...)
- b = append(b, trans.text[trans.indexes[1]:]...)
-
- return string(b), nil
-}
-
-// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
-// and 'param1' and 'param2' passed in
-func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
-
- tarr, ok := t.rangeTanslations[key]
- if !ok {
- return unknownTranslation, ErrUnknowTranslation
- }
-
- rule := t.RangePluralRule(num1, digits1, num2, digits2)
-
- trans := tarr[rule]
-
- b := make([]byte, 0, 64)
- b = append(b, trans.text[:trans.indexes[0]]...)
- b = append(b, param1...)
- b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
- b = append(b, param2...)
- b = append(b, trans.text[trans.indexes[3]:]...)
-
- return string(b), nil
-}
-
-// VerifyTranslations checks to ensures that no plural rules have been
-// missed within the translations.
-func (t *translator) VerifyTranslations() error {
-
- for k, v := range t.cardinalTanslations {
-
- for _, rule := range t.PluralsCardinal() {
-
- if v[rule] == nil {
- return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
- }
- }
- }
-
- for k, v := range t.ordinalTanslations {
-
- for _, rule := range t.PluralsOrdinal() {
-
- if v[rule] == nil {
- return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
- }
- }
- }
-
- for k, v := range t.rangeTanslations {
-
- for _, rule := range t.PluralsRange() {
-
- if v[rule] == nil {
- return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go
deleted file mode 100644
index dbf707f5c7..0000000000
--- a/vendor/github.com/go-playground/universal-translator/universal_translator.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package ut
-
-import (
- "strings"
-
- "github.com/go-playground/locales"
-)
-
-// UniversalTranslator holds all locale & translation data
-type UniversalTranslator struct {
- translators map[string]Translator
- fallback Translator
-}
-
-// New returns a new UniversalTranslator instance set with
-// the fallback locale and locales it should support
-func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
-
- t := &UniversalTranslator{
- translators: make(map[string]Translator),
- }
-
- for _, v := range supportedLocales {
-
- trans := newTranslator(v)
- t.translators[strings.ToLower(trans.Locale())] = trans
-
- if fallback.Locale() == v.Locale() {
- t.fallback = trans
- }
- }
-
- if t.fallback == nil && fallback != nil {
- t.fallback = newTranslator(fallback)
- }
-
- return t
-}
-
-// FindTranslator trys to find a Translator based on an array of locales
-// and returns the first one it can find, otherwise returns the
-// fallback translator.
-func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
-
- for _, locale := range locales {
-
- if trans, found = t.translators[strings.ToLower(locale)]; found {
- return
- }
- }
-
- return t.fallback, false
-}
-
-// GetTranslator returns the specified translator for the given locale,
-// or fallback if not found
-func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
-
- if trans, found = t.translators[strings.ToLower(locale)]; found {
- return
- }
-
- return t.fallback, false
-}
-
-// GetFallback returns the fallback locale
-func (t *UniversalTranslator) GetFallback() Translator {
- return t.fallback
-}
-
-// AddTranslator adds the supplied translator, if it already exists the override param
-// will be checked and if false an error will be returned, otherwise the translator will be
-// overridden; if the fallback matches the supplied translator it will be overridden as well
-// NOTE: this is normally only used when translator is embedded within a library
-func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
-
- lc := strings.ToLower(translator.Locale())
- _, ok := t.translators[lc]
- if ok && !override {
- return &ErrExistingTranslator{locale: translator.Locale()}
- }
-
- trans := newTranslator(translator)
-
- if t.fallback.Locale() == translator.Locale() {
-
- // because it's optional to have a fallback, I don't impose that limitation
- // don't know why you wouldn't but...
- if !override {
- return &ErrExistingTranslator{locale: translator.Locale()}
- }
-
- t.fallback = trans
- }
-
- t.translators[lc] = trans
-
- return nil
-}
-
-// VerifyTranslations runs through all locales and identifies any issues
-// eg. missing plural rules for a locale
-func (t *UniversalTranslator) VerifyTranslations() (err error) {
-
- for _, trans := range t.translators {
- err = trans.VerifyTranslations()
- if err != nil {
- return
- }
- }
-
- return
-}
diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore
deleted file mode 100644
index 2410a91b95..0000000000
--- a/vendor/github.com/go-playground/validator/v10/.gitignore
+++ /dev/null
@@ -1,31 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-bin
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-*.test
-*.out
-*.txt
-cover.html
-README.html
-.idea
diff --git a/vendor/github.com/go-playground/validator/v10/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE
deleted file mode 100644
index 6a2ae9aa4d..0000000000
--- a/vendor/github.com/go-playground/validator/v10/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Dean Karn
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
deleted file mode 100644
index b809c4ce12..0000000000
--- a/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Maintainers Guide
-
-### Semantic Versioning
-Semantic versioning as defined [here](https://semver.org) must be strictly adhered to.
-
-### External Dependencies
-Any new external dependencies MUST:
-- Have a compatible LICENSE present.
-- Be actively maintained.
-- Be approved by @go-playground/admins
-
-### PR Merge Requirements
-- Up-to-date branch.
-- Passing tests and linting.
-- CODEOWNERS approval.
-- Tests that cover both the Happy and Unhappy paths.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile
deleted file mode 100644
index ec3455bd59..0000000000
--- a/vendor/github.com/go-playground/validator/v10/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-GOCMD=GO111MODULE=on go
-
-linters-install:
- @golangci-lint --version >/dev/null 2>&1 || { \
- echo "installing linting tools..."; \
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
- }
-
-lint: linters-install
- golangci-lint run
-
-test:
- $(GOCMD) test -cover -race ./...
-
-bench:
- $(GOCMD) test -bench=. -benchmem ./...
-
-.PHONY: test lint linters-install
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
deleted file mode 100644
index f5a9b75bb3..0000000000
--- a/vendor/github.com/go-playground/validator/v10/README.md
+++ /dev/null
@@ -1,348 +0,0 @@
-Package validator
-=================
-[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.13.0-green.svg)
-[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
-[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
-[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
-![License](https://img.shields.io/dub/l/vibe-d.svg)
-
-Package validator implements value validations for structs and individual fields based on tags.
-
-It has the following **unique** features:
-
-- Cross Field and Cross Struct validations by using validation tags or custom validators.
-- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
-- Ability to dive into both map keys and values for validation
-- Handles type interface by determining it's underlying type prior to validation.
-- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
-- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
-- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
-- Customizable i18n aware error messages.
-- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding)
-
-Installation
-------------
-
-Use go get.
-
- go get github.com/go-playground/validator/v10
-
-Then import the validator package into your own code.
-
- import "github.com/go-playground/validator/v10"
-
-Error Return Value
--------
-
-Validation functions return type error
-
-They return type error to avoid the issue discussed in the following, where err is always != nil:
-
-* http://stackoverflow.com/a/29138676/3158232
-* https://github.com/go-playground/validator/issues/134
-
-Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
-
-```go
-err := validate.Struct(mystruct)
-validationErrors := err.(validator.ValidationErrors)
- ```
-
-Usage and documentation
-------
-
-Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs.
-
-##### Examples:
-
-- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go)
-- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go)
-- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go)
-- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go)
-- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
-- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
-
-Baked-in Validations
-------
-
-### Fields:
-
-| Tag | Description |
-| - | - |
-| eqcsfield | Field Equals Another Field (relative)|
-| eqfield | Field Equals Another Field |
-| fieldcontains | Check the indicated characters are present in the Field |
-| fieldexcludes | Check the indicated characters are not present in the field |
-| gtcsfield | Field Greater Than Another Relative Field |
-| gtecsfield | Field Greater Than or Equal To Another Relative Field |
-| gtefield | Field Greater Than or Equal To Another Field |
-| gtfield | Field Greater Than Another Field |
-| ltcsfield | Less Than Another Relative Field |
-| ltecsfield | Less Than or Equal To Another Relative Field |
-| ltefield | Less Than or Equal To Another Field |
-| ltfield | Less Than Another Field |
-| necsfield | Field Does Not Equal Another Field (relative) |
-| nefield | Field Does Not Equal Another Field |
-
-### Network:
-
-| Tag | Description |
-| - | - |
-| cidr | Classless Inter-Domain Routing CIDR |
-| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
-| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
-| datauri | Data URL |
-| fqdn | Full Qualified Domain Name (FQDN) |
-| hostname | Hostname RFC 952 |
-| hostname_port | HostPort |
-| hostname_rfc1123 | Hostname RFC 1123 |
-| ip | Internet Protocol Address IP |
-| ip4_addr | Internet Protocol Address IPv4 |
-| ip6_addr | Internet Protocol Address IPv6 |
-| ip_addr | Internet Protocol Address IP |
-| ipv4 | Internet Protocol Address IPv4 |
-| ipv6 | Internet Protocol Address IPv6 |
-| mac | Media Access Control Address MAC |
-| tcp4_addr | Transmission Control Protocol Address TCPv4 |
-| tcp6_addr | Transmission Control Protocol Address TCPv6 |
-| tcp_addr | Transmission Control Protocol Address TCP |
-| udp4_addr | User Datagram Protocol Address UDPv4 |
-| udp6_addr | User Datagram Protocol Address UDPv6 |
-| udp_addr | User Datagram Protocol Address UDP |
-| unix_addr | Unix domain socket end point Address |
-| uri | URI String |
-| url | URL String |
-| http_url | HTTP URL String |
-| url_encoded | URL Encoded |
-| urn_rfc2141 | Urn RFC 2141 String |
-
-### Strings:
-
-| Tag | Description |
-| - | - |
-| alpha | Alpha Only |
-| alphanum | Alphanumeric |
-| alphanumunicode | Alphanumeric Unicode |
-| alphaunicode | Alpha Unicode |
-| ascii | ASCII |
-| boolean | Boolean |
-| contains | Contains |
-| containsany | Contains Any |
-| containsrune | Contains Rune |
-| endsnotwith | Ends Not With |
-| endswith | Ends With |
-| excludes | Excludes |
-| excludesall | Excludes All |
-| excludesrune | Excludes Rune |
-| lowercase | Lowercase |
-| multibyte | Multi-Byte Characters |
-| number | Number |
-| numeric | Numeric |
-| printascii | Printable ASCII |
-| startsnotwith | Starts Not With |
-| startswith | Starts With |
-| uppercase | Uppercase |
-
-### Format:
-| Tag | Description |
-| - | - |
-| base64 | Base64 String |
-| base64url | Base64URL String |
-| base64rawurl | Base64RawURL String |
-| bic | Business Identifier Code (ISO 9362) |
-| bcp47_language_tag | Language tag (BCP 47) |
-| btc_addr | Bitcoin Address |
-| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
-| credit_card | Credit Card Number |
-| mongodb | MongoDB ObjectID |
-| cron | Cron |
-| datetime | Datetime |
-| e164 | e164 formatted phone number |
-| email | E-mail String
-| eth_addr | Ethereum Address |
-| hexadecimal | Hexadecimal String |
-| hexcolor | Hexcolor String |
-| hsl | HSL String |
-| hsla | HSLA String |
-| html | HTML Tags |
-| html_encoded | HTML Encoded |
-| isbn | International Standard Book Number |
-| isbn10 | International Standard Book Number 10 |
-| isbn13 | International Standard Book Number 13 |
-| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) |
-| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) |
-| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) |
-| iso3166_2 | Country subdivision code (ISO 3166-2) |
-| iso4217 | Currency code (ISO 4217) |
-| json | JSON |
-| jwt | JSON Web Token (JWT) |
-| latitude | Latitude |
-| longitude | Longitude |
-| luhn_checksum | Luhn Algorithm Checksum (for strings and (u)int) |
-| postcode_iso3166_alpha2 | Postcode |
-| postcode_iso3166_alpha2_field | Postcode |
-| rgb | RGB String |
-| rgba | RGBA String |
-| ssn | Social Security Number SSN |
-| timezone | Timezone |
-| uuid | Universally Unique Identifier UUID |
-| uuid3 | Universally Unique Identifier UUID v3 |
-| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
-| uuid4 | Universally Unique Identifier UUID v4 |
-| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
-| uuid5 | Universally Unique Identifier UUID v5 |
-| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
-| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
-| md4 | MD4 hash |
-| md5 | MD5 hash |
-| sha256 | SHA256 hash |
-| sha384 | SHA384 hash |
-| sha512 | SHA512 hash |
-| ripemd128 | RIPEMD-128 hash |
-| ripemd128 | RIPEMD-160 hash |
-| tiger128 | TIGER128 hash |
-| tiger160 | TIGER160 hash |
-| tiger192 | TIGER192 hash |
-| semver | Semantic Versioning 2.0.0 |
-| ulid | Universally Unique Lexicographically Sortable Identifier ULID |
-| cve | Common Vulnerabilities and Exposures Identifier (CVE id) |
-
-### Comparisons:
-| Tag | Description |
-| - | - |
-| eq | Equals |
-| eq_ignore_case | Equals ignoring case |
-| gt | Greater than|
-| gte | Greater than or equal |
-| lt | Less Than |
-| lte | Less Than or Equal |
-| ne | Not Equal |
-| ne_ignore_case | Not Equal ignoring case |
-
-### Other:
-| Tag | Description |
-| - | - |
-| dir | Existing Directory |
-| dirpath | Directory Path |
-| file | Existing File |
-| filepath | File Path |
-| isdefault | Is Default |
-| len | Length |
-| max | Maximum |
-| min | Minimum |
-| oneof | One Of |
-| required | Required |
-| required_if | Required If |
-| required_unless | Required Unless |
-| required_with | Required With |
-| required_with_all | Required With All |
-| required_without | Required Without |
-| required_without_all | Required Without All |
-| excluded_if | Excluded If |
-| excluded_unless | Excluded Unless |
-| excluded_with | Excluded With |
-| excluded_with_all | Excluded With All |
-| excluded_without | Excluded Without |
-| excluded_without_all | Excluded Without All |
-| unique | Unique |
-
-#### Aliases:
-| Tag | Description |
-| - | - |
-| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla |
-| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric |
-
-Benchmarks
-------
-###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
-```go
-goos: darwin
-goarch: amd64
-pkg: github.com/go-playground/validator
-BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
-BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
-BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
-BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
-BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
-BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
-BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
-BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
-BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
-BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
-BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
-BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
-BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
-BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
-BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
-BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
-BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
-BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
-BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
-BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
-BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
-BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
-BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
-BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
-BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
-BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
-BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
-BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
-BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
-BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
-BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
-BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
-BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
-BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
-BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
-BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
-BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
-BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
-BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
-BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
-BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
-BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
-BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
-BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
-BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
-BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
-BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
-BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
-BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
-```
-
-Complementary Software
-----------------------
-
-Here is a list of software that complements using this library either pre or post validation.
-
-* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support.
-* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects
-
-How to Contribute
-------
-
-Make a pull request...
-
-License
--------
-Distributed under MIT License, please see license file within the code for more details.
-
-Maintainers
------------
-This project has grown large enough that more than one person is required to properly support the community.
-If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
deleted file mode 100644
index d66980b6e2..0000000000
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ /dev/null
@@ -1,2785 +0,0 @@
-package validator
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "io/fs"
- "net"
- "net/url"
- "os"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "syscall"
- "time"
- "unicode/utf8"
-
- "golang.org/x/crypto/sha3"
- "golang.org/x/text/language"
-
- "github.com/leodido/go-urn"
-)
-
-// Func accepts a FieldLevel interface for all validation needs. The return
-// value should be true when validation succeeds.
-type Func func(fl FieldLevel) bool
-
-// FuncCtx accepts a context.Context and FieldLevel interface for all
-// validation needs. The return value should be true when validation succeeds.
-type FuncCtx func(ctx context.Context, fl FieldLevel) bool
-
-// wrapFunc wraps normal Func makes it compatible with FuncCtx
-func wrapFunc(fn Func) FuncCtx {
- if fn == nil {
- return nil // be sure not to wrap a bad function.
- }
- return func(ctx context.Context, fl FieldLevel) bool {
- return fn(fl)
- }
-}
-
-var (
- restrictedTags = map[string]struct{}{
- diveTag: {},
- keysTag: {},
- endKeysTag: {},
- structOnlyTag: {},
- omitempty: {},
- skipValidationTag: {},
- utf8HexComma: {},
- utf8Pipe: {},
- noStructLevelTag: {},
- requiredTag: {},
- isdefault: {},
- }
-
- // bakedInAliases is a default mapping of a single validation tag that
- // defines a common or complex set of validation(s) to simplify
- // adding validation to structs.
- bakedInAliases = map[string]string{
- "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
- "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
- }
-
- // bakedInValidators is the default map of ValidationFunc
- // you can add, remove or even replace items to suite your needs,
- // or even disregard and use your own map if so desired.
- bakedInValidators = map[string]Func{
- "required": hasValue,
- "required_if": requiredIf,
- "required_unless": requiredUnless,
- "skip_unless": skipUnless,
- "required_with": requiredWith,
- "required_with_all": requiredWithAll,
- "required_without": requiredWithout,
- "required_without_all": requiredWithoutAll,
- "excluded_if": excludedIf,
- "excluded_unless": excludedUnless,
- "excluded_with": excludedWith,
- "excluded_with_all": excludedWithAll,
- "excluded_without": excludedWithout,
- "excluded_without_all": excludedWithoutAll,
- "isdefault": isDefault,
- "len": hasLengthOf,
- "min": hasMinOf,
- "max": hasMaxOf,
- "eq": isEq,
- "eq_ignore_case": isEqIgnoreCase,
- "ne": isNe,
- "ne_ignore_case": isNeIgnoreCase,
- "lt": isLt,
- "lte": isLte,
- "gt": isGt,
- "gte": isGte,
- "eqfield": isEqField,
- "eqcsfield": isEqCrossStructField,
- "necsfield": isNeCrossStructField,
- "gtcsfield": isGtCrossStructField,
- "gtecsfield": isGteCrossStructField,
- "ltcsfield": isLtCrossStructField,
- "ltecsfield": isLteCrossStructField,
- "nefield": isNeField,
- "gtefield": isGteField,
- "gtfield": isGtField,
- "ltefield": isLteField,
- "ltfield": isLtField,
- "fieldcontains": fieldContains,
- "fieldexcludes": fieldExcludes,
- "alpha": isAlpha,
- "alphanum": isAlphanum,
- "alphaunicode": isAlphaUnicode,
- "alphanumunicode": isAlphanumUnicode,
- "boolean": isBoolean,
- "numeric": isNumeric,
- "number": isNumber,
- "hexadecimal": isHexadecimal,
- "hexcolor": isHEXColor,
- "rgb": isRGB,
- "rgba": isRGBA,
- "hsl": isHSL,
- "hsla": isHSLA,
- "e164": isE164,
- "email": isEmail,
- "url": isURL,
- "http_url": isHttpURL,
- "uri": isURI,
- "urn_rfc2141": isUrnRFC2141, // RFC 2141
- "file": isFile,
- "filepath": isFilePath,
- "base64": isBase64,
- "base64url": isBase64URL,
- "base64rawurl": isBase64RawURL,
- "contains": contains,
- "containsany": containsAny,
- "containsrune": containsRune,
- "excludes": excludes,
- "excludesall": excludesAll,
- "excludesrune": excludesRune,
- "startswith": startsWith,
- "endswith": endsWith,
- "startsnotwith": startsNotWith,
- "endsnotwith": endsNotWith,
- "isbn": isISBN,
- "isbn10": isISBN10,
- "isbn13": isISBN13,
- "eth_addr": isEthereumAddress,
- "eth_addr_checksum": isEthereumAddressChecksum,
- "btc_addr": isBitcoinAddress,
- "btc_addr_bech32": isBitcoinBech32Address,
- "uuid": isUUID,
- "uuid3": isUUID3,
- "uuid4": isUUID4,
- "uuid5": isUUID5,
- "uuid_rfc4122": isUUIDRFC4122,
- "uuid3_rfc4122": isUUID3RFC4122,
- "uuid4_rfc4122": isUUID4RFC4122,
- "uuid5_rfc4122": isUUID5RFC4122,
- "ulid": isULID,
- "md4": isMD4,
- "md5": isMD5,
- "sha256": isSHA256,
- "sha384": isSHA384,
- "sha512": isSHA512,
- "ripemd128": isRIPEMD128,
- "ripemd160": isRIPEMD160,
- "tiger128": isTIGER128,
- "tiger160": isTIGER160,
- "tiger192": isTIGER192,
- "ascii": isASCII,
- "printascii": isPrintableASCII,
- "multibyte": hasMultiByteCharacter,
- "datauri": isDataURI,
- "latitude": isLatitude,
- "longitude": isLongitude,
- "ssn": isSSN,
- "ipv4": isIPv4,
- "ipv6": isIPv6,
- "ip": isIP,
- "cidrv4": isCIDRv4,
- "cidrv6": isCIDRv6,
- "cidr": isCIDR,
- "tcp4_addr": isTCP4AddrResolvable,
- "tcp6_addr": isTCP6AddrResolvable,
- "tcp_addr": isTCPAddrResolvable,
- "udp4_addr": isUDP4AddrResolvable,
- "udp6_addr": isUDP6AddrResolvable,
- "udp_addr": isUDPAddrResolvable,
- "ip4_addr": isIP4AddrResolvable,
- "ip6_addr": isIP6AddrResolvable,
- "ip_addr": isIPAddrResolvable,
- "unix_addr": isUnixAddrResolvable,
- "mac": isMAC,
- "hostname": isHostnameRFC952, // RFC 952
- "hostname_rfc1123": isHostnameRFC1123, // RFC 1123
- "fqdn": isFQDN,
- "unique": isUnique,
- "oneof": isOneOf,
- "html": isHTML,
- "html_encoded": isHTMLEncoded,
- "url_encoded": isURLEncoded,
- "dir": isDir,
- "dirpath": isDirPath,
- "json": isJSON,
- "jwt": isJWT,
- "hostname_port": isHostnamePort,
- "lowercase": isLowercase,
- "uppercase": isUppercase,
- "datetime": isDatetime,
- "timezone": isTimeZone,
- "iso3166_1_alpha2": isIso3166Alpha2,
- "iso3166_1_alpha3": isIso3166Alpha3,
- "iso3166_1_alpha_numeric": isIso3166AlphaNumeric,
- "iso3166_2": isIso31662,
- "iso4217": isIso4217,
- "iso4217_numeric": isIso4217Numeric,
- "bcp47_language_tag": isBCP47LanguageTag,
- "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2,
- "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field,
- "bic": isIsoBicFormat,
- "semver": isSemverFormat,
- "dns_rfc1035_label": isDnsRFC1035LabelFormat,
- "credit_card": isCreditCard,
- "cve": isCveFormat,
- "luhn_checksum": hasLuhnChecksum,
- "mongodb": isMongoDB,
- "cron": isCron,
- }
-)
-
-var (
- oneofValsCache = map[string][]string{}
- oneofValsCacheRWLock = sync.RWMutex{}
-)
-
-func parseOneOfParam2(s string) []string {
- oneofValsCacheRWLock.RLock()
- vals, ok := oneofValsCache[s]
- oneofValsCacheRWLock.RUnlock()
- if !ok {
- oneofValsCacheRWLock.Lock()
- vals = splitParamsRegex.FindAllString(s, -1)
- for i := 0; i < len(vals); i++ {
- vals[i] = strings.Replace(vals[i], "'", "", -1)
- }
- oneofValsCache[s] = vals
- oneofValsCacheRWLock.Unlock()
- }
- return vals
-}
-
-func isURLEncoded(fl FieldLevel) bool {
- return uRLEncodedRegex.MatchString(fl.Field().String())
-}
-
-func isHTMLEncoded(fl FieldLevel) bool {
- return hTMLEncodedRegex.MatchString(fl.Field().String())
-}
-
-func isHTML(fl FieldLevel) bool {
- return hTMLRegex.MatchString(fl.Field().String())
-}
-
-func isOneOf(fl FieldLevel) bool {
- vals := parseOneOfParam2(fl.Param())
-
- field := fl.Field()
-
- var v string
- switch field.Kind() {
- case reflect.String:
- v = field.String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- v = strconv.FormatInt(field.Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- v = strconv.FormatUint(field.Uint(), 10)
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
- for i := 0; i < len(vals); i++ {
- if vals[i] == v {
- return true
- }
- }
- return false
-}
-
-// isUnique is the validation function for validating if each array|slice|map value is unique
-func isUnique(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
- v := reflect.ValueOf(struct{}{})
-
- switch field.Kind() {
- case reflect.Slice, reflect.Array:
- elem := field.Type().Elem()
- if elem.Kind() == reflect.Ptr {
- elem = elem.Elem()
- }
-
- if param == "" {
- m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
-
- for i := 0; i < field.Len(); i++ {
- m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
- }
- return field.Len() == m.Len()
- }
-
- sf, ok := elem.FieldByName(param)
- if !ok {
- panic(fmt.Sprintf("Bad field name %s", param))
- }
-
- sfTyp := sf.Type
- if sfTyp.Kind() == reflect.Ptr {
- sfTyp = sfTyp.Elem()
- }
-
- m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
- var fieldlen int
- for i := 0; i < field.Len(); i++ {
- key := reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param))
- if key.IsValid() {
- fieldlen++
- m.SetMapIndex(key, v)
- }
- }
- return fieldlen == m.Len()
- case reflect.Map:
- var m reflect.Value
- if field.Type().Elem().Kind() == reflect.Ptr {
- m = reflect.MakeMap(reflect.MapOf(field.Type().Elem().Elem(), v.Type()))
- } else {
- m = reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
- }
-
- for _, k := range field.MapKeys() {
- m.SetMapIndex(reflect.Indirect(field.MapIndex(k)), v)
- }
-
- return field.Len() == m.Len()
- default:
- if parent := fl.Parent(); parent.Kind() == reflect.Struct {
- uniqueField := parent.FieldByName(param)
- if uniqueField == reflect.ValueOf(nil) {
- panic(fmt.Sprintf("Bad field name provided %s", param))
- }
-
- if uniqueField.Kind() != field.Kind() {
- panic(fmt.Sprintf("Bad field type %T:%T", field.Interface(), uniqueField.Interface()))
- }
-
- return field.Interface() != uniqueField.Interface()
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
-}
-
-// isMAC is the validation function for validating if the field's value is a valid MAC address.
-func isMAC(fl FieldLevel) bool {
- _, err := net.ParseMAC(fl.Field().String())
-
- return err == nil
-}
-
-// isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
-func isCIDRv4(fl FieldLevel) bool {
- ip, _, err := net.ParseCIDR(fl.Field().String())
-
- return err == nil && ip.To4() != nil
-}
-
-// isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
-func isCIDRv6(fl FieldLevel) bool {
- ip, _, err := net.ParseCIDR(fl.Field().String())
-
- return err == nil && ip.To4() == nil
-}
-
-// isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
-func isCIDR(fl FieldLevel) bool {
- _, _, err := net.ParseCIDR(fl.Field().String())
-
- return err == nil
-}
-
-// isIPv4 is the validation function for validating if a value is a valid v4 IP address.
-func isIPv4(fl FieldLevel) bool {
- ip := net.ParseIP(fl.Field().String())
-
- return ip != nil && ip.To4() != nil
-}
-
-// isIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
-func isIPv6(fl FieldLevel) bool {
- ip := net.ParseIP(fl.Field().String())
-
- return ip != nil && ip.To4() == nil
-}
-
-// isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
-func isIP(fl FieldLevel) bool {
- ip := net.ParseIP(fl.Field().String())
-
- return ip != nil
-}
-
-// isSSN is the validation function for validating if the field's value is a valid SSN.
-func isSSN(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Len() != 11 {
- return false
- }
-
- return sSNRegex.MatchString(field.String())
-}
-
-// isLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
-func isLongitude(fl FieldLevel) bool {
- field := fl.Field()
-
- var v string
- switch field.Kind() {
- case reflect.String:
- v = field.String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- v = strconv.FormatInt(field.Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- v = strconv.FormatUint(field.Uint(), 10)
- case reflect.Float32:
- v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
- case reflect.Float64:
- v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
-
- return longitudeRegex.MatchString(v)
-}
-
-// isLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
-func isLatitude(fl FieldLevel) bool {
- field := fl.Field()
-
- var v string
- switch field.Kind() {
- case reflect.String:
- v = field.String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- v = strconv.FormatInt(field.Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- v = strconv.FormatUint(field.Uint(), 10)
- case reflect.Float32:
- v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
- case reflect.Float64:
- v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
-
- return latitudeRegex.MatchString(v)
-}
-
-// isDataURI is the validation function for validating if the field's value is a valid data URI.
-func isDataURI(fl FieldLevel) bool {
- uri := strings.SplitN(fl.Field().String(), ",", 2)
-
- if len(uri) != 2 {
- return false
- }
-
- if !dataURIRegex.MatchString(uri[0]) {
- return false
- }
-
- return base64Regex.MatchString(uri[1])
-}
-
-// hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
-func hasMultiByteCharacter(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Len() == 0 {
- return true
- }
-
- return multibyteRegex.MatchString(field.String())
-}
-
-// isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
-func isPrintableASCII(fl FieldLevel) bool {
- return printableASCIIRegex.MatchString(fl.Field().String())
-}
-
-// isASCII is the validation function for validating if the field's value is a valid ASCII character.
-func isASCII(fl FieldLevel) bool {
- return aSCIIRegex.MatchString(fl.Field().String())
-}
-
-// isUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
-func isUUID5(fl FieldLevel) bool {
- return uUID5Regex.MatchString(fl.Field().String())
-}
-
-// isUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
-func isUUID4(fl FieldLevel) bool {
- return uUID4Regex.MatchString(fl.Field().String())
-}
-
-// isUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
-func isUUID3(fl FieldLevel) bool {
- return uUID3Regex.MatchString(fl.Field().String())
-}
-
-// isUUID is the validation function for validating if the field's value is a valid UUID of any version.
-func isUUID(fl FieldLevel) bool {
- return uUIDRegex.MatchString(fl.Field().String())
-}
-
-// isUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
-func isUUID5RFC4122(fl FieldLevel) bool {
- return uUID5RFC4122Regex.MatchString(fl.Field().String())
-}
-
-// isUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
-func isUUID4RFC4122(fl FieldLevel) bool {
- return uUID4RFC4122Regex.MatchString(fl.Field().String())
-}
-
-// isUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
-func isUUID3RFC4122(fl FieldLevel) bool {
- return uUID3RFC4122Regex.MatchString(fl.Field().String())
-}
-
-// isUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
-func isUUIDRFC4122(fl FieldLevel) bool {
- return uUIDRFC4122Regex.MatchString(fl.Field().String())
-}
-
-// isULID is the validation function for validating if the field's value is a valid ULID.
-func isULID(fl FieldLevel) bool {
- return uLIDRegex.MatchString(fl.Field().String())
-}
-
-// isMD4 is the validation function for validating if the field's value is a valid MD4.
-func isMD4(fl FieldLevel) bool {
- return md4Regex.MatchString(fl.Field().String())
-}
-
-// isMD5 is the validation function for validating if the field's value is a valid MD5.
-func isMD5(fl FieldLevel) bool {
- return md5Regex.MatchString(fl.Field().String())
-}
-
-// isSHA256 is the validation function for validating if the field's value is a valid SHA256.
-func isSHA256(fl FieldLevel) bool {
- return sha256Regex.MatchString(fl.Field().String())
-}
-
-// isSHA384 is the validation function for validating if the field's value is a valid SHA384.
-func isSHA384(fl FieldLevel) bool {
- return sha384Regex.MatchString(fl.Field().String())
-}
-
-// isSHA512 is the validation function for validating if the field's value is a valid SHA512.
-func isSHA512(fl FieldLevel) bool {
- return sha512Regex.MatchString(fl.Field().String())
-}
-
-// isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128.
-func isRIPEMD128(fl FieldLevel) bool {
- return ripemd128Regex.MatchString(fl.Field().String())
-}
-
-// isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160.
-func isRIPEMD160(fl FieldLevel) bool {
- return ripemd160Regex.MatchString(fl.Field().String())
-}
-
-// isTIGER128 is the validation function for validating if the field's value is a valid TIGER128.
-func isTIGER128(fl FieldLevel) bool {
- return tiger128Regex.MatchString(fl.Field().String())
-}
-
-// isTIGER160 is the validation function for validating if the field's value is a valid TIGER160.
-func isTIGER160(fl FieldLevel) bool {
- return tiger160Regex.MatchString(fl.Field().String())
-}
-
-// isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192.
-func isTIGER192(fl FieldLevel) bool {
- return tiger192Regex.MatchString(fl.Field().String())
-}
-
-// isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
-func isISBN(fl FieldLevel) bool {
- return isISBN10(fl) || isISBN13(fl)
-}
-
-// isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
-func isISBN13(fl FieldLevel) bool {
- s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4)
-
- if !iSBN13Regex.MatchString(s) {
- return false
- }
-
- var checksum int32
- var i int32
-
- factor := []int32{1, 3}
-
- for i = 0; i < 12; i++ {
- checksum += factor[i%2] * int32(s[i]-'0')
- }
-
- return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0
-}
-
-// isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
-func isISBN10(fl FieldLevel) bool {
- s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3)
-
- if !iSBN10Regex.MatchString(s) {
- return false
- }
-
- var checksum int32
- var i int32
-
- for i = 0; i < 9; i++ {
- checksum += (i + 1) * int32(s[i]-'0')
- }
-
- if s[9] == 'X' {
- checksum += 10 * 10
- } else {
- checksum += 10 * int32(s[9]-'0')
- }
-
- return checksum%11 == 0
-}
-
-// isEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address.
-func isEthereumAddress(fl FieldLevel) bool {
- address := fl.Field().String()
-
- return ethAddressRegex.MatchString(address)
-}
-
-// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksumed Ethereum address.
-func isEthereumAddressChecksum(fl FieldLevel) bool {
- address := fl.Field().String()
-
- if !ethAddressRegex.MatchString(address) {
- return false
- }
- // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
- address = address[2:] // Skip "0x" prefix.
- h := sha3.NewLegacyKeccak256()
- // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash
- _, _ = h.Write([]byte(strings.ToLower(address)))
- hash := hex.EncodeToString(h.Sum(nil))
-
- for i := 0; i < len(address); i++ {
- if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case.
- continue
- }
- if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' {
- return false
- }
- }
-
- return true
-}
-
-// isBitcoinAddress is the validation function for validating if the field's value is a valid btc address
-func isBitcoinAddress(fl FieldLevel) bool {
- address := fl.Field().String()
-
- if !btcAddressRegex.MatchString(address) {
- return false
- }
-
- alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
-
- decode := [25]byte{}
-
- for _, n := range []byte(address) {
- d := bytes.IndexByte(alphabet, n)
-
- for i := 24; i >= 0; i-- {
- d += 58 * int(decode[i])
- decode[i] = byte(d % 256)
- d /= 256
- }
- }
-
- h := sha256.New()
- _, _ = h.Write(decode[:21])
- d := h.Sum([]byte{})
- h = sha256.New()
- _, _ = h.Write(d)
-
- validchecksum := [4]byte{}
- computedchecksum := [4]byte{}
-
- copy(computedchecksum[:], h.Sum(d[:0]))
- copy(validchecksum[:], decode[21:])
-
- return validchecksum == computedchecksum
-}
-
-// isBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
-func isBitcoinBech32Address(fl FieldLevel) bool {
- address := fl.Field().String()
-
- if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {
- return false
- }
-
- am := len(address) % 8
-
- if am == 0 || am == 3 || am == 5 {
- return false
- }
-
- address = strings.ToLower(address)
-
- alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
-
- hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc
- addr := address[3:]
- dp := make([]int, 0, len(addr))
-
- for _, c := range addr {
- dp = append(dp, strings.IndexRune(alphabet, c))
- }
-
- ver := dp[0]
-
- if ver < 0 || ver > 16 {
- return false
- }
-
- if ver == 0 {
- if len(address) != 42 && len(address) != 62 {
- return false
- }
- }
-
- values := append(hr, dp...)
-
- GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
-
- p := 1
-
- for _, v := range values {
- b := p >> 25
- p = (p&0x1ffffff)<<5 ^ v
-
- for i := 0; i < 5; i++ {
- if (b>>uint(i))&1 == 1 {
- p ^= GEN[i]
- }
- }
- }
-
- if p != 1 {
- return false
- }
-
- b := uint(0)
- acc := 0
- mv := (1 << 5) - 1
- var sw []int
-
- for _, v := range dp[1 : len(dp)-6] {
- acc = (acc << 5) | v
- b += 5
- for b >= 8 {
- b -= 8
- sw = append(sw, (acc>>b)&mv)
- }
- }
-
- if len(sw) < 2 || len(sw) > 40 {
- return false
- }
-
- return true
-}
-
-// excludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
-func excludesRune(fl FieldLevel) bool {
- return !containsRune(fl)
-}
-
-// excludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
-func excludesAll(fl FieldLevel) bool {
- return !containsAny(fl)
-}
-
-// excludes is the validation function for validating that the field's value does not contain the text specified within the param.
-func excludes(fl FieldLevel) bool {
- return !contains(fl)
-}
-
-// containsRune is the validation function for validating that the field's value contains the rune specified within the param.
-func containsRune(fl FieldLevel) bool {
- r, _ := utf8.DecodeRuneInString(fl.Param())
-
- return strings.ContainsRune(fl.Field().String(), r)
-}
-
-// containsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
-func containsAny(fl FieldLevel) bool {
- return strings.ContainsAny(fl.Field().String(), fl.Param())
-}
-
-// contains is the validation function for validating that the field's value contains the text specified within the param.
-func contains(fl FieldLevel) bool {
- return strings.Contains(fl.Field().String(), fl.Param())
-}
-
-// startsWith is the validation function for validating that the field's value starts with the text specified within the param.
-func startsWith(fl FieldLevel) bool {
- return strings.HasPrefix(fl.Field().String(), fl.Param())
-}
-
-// endsWith is the validation function for validating that the field's value ends with the text specified within the param.
-func endsWith(fl FieldLevel) bool {
- return strings.HasSuffix(fl.Field().String(), fl.Param())
-}
-
-// startsNotWith is the validation function for validating that the field's value does not start with the text specified within the param.
-func startsNotWith(fl FieldLevel) bool {
- return !startsWith(fl)
-}
-
-// endsNotWith is the validation function for validating that the field's value does not end with the text specified within the param.
-func endsNotWith(fl FieldLevel) bool {
- return !endsWith(fl)
-}
-
-// fieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
-func fieldContains(fl FieldLevel) bool {
- field := fl.Field()
-
- currentField, _, ok := fl.GetStructFieldOK()
-
- if !ok {
- return false
- }
-
- return strings.Contains(field.String(), currentField.String())
-}
-
-// fieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
-func fieldExcludes(fl FieldLevel) bool {
- field := fl.Field()
-
- currentField, _, ok := fl.GetStructFieldOK()
- if !ok {
- return true
- }
-
- return !strings.Contains(field.String(), currentField.String())
-}
-
-// isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
-func isNeField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
-
- if !ok || currentKind != kind {
- return true
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() != currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() != currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() != currentField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) != int64(currentField.Len())
-
- case reflect.Bool:
- return field.Bool() != currentField.Bool()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
-
- return !fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return true
- }
- }
-
- // default reflect.String:
- return field.String() != currentField.String()
-}
-
-// isNe is the validation function for validating that the field's value does not equal the provided param value.
-func isNe(fl FieldLevel) bool {
- return !isEq(fl)
-}
-
-// isNeIgnoreCase is the validation function for validating that the field's string value does not equal the
-// provided param value. The comparison is case-insensitive
-func isNeIgnoreCase(fl FieldLevel) bool {
- return !isEqIgnoreCase(fl)
-}
-
-// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
-func isLteCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, topKind, ok := fl.GetStructFieldOK()
- if !ok || topKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() <= topField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() <= topField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() <= topField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) <= int64(topField.Len())
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- fieldTime := field.Convert(timeType).Interface().(time.Time)
- topTime := topField.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return field.String() <= topField.String()
-}
-
-// isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
-// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
-func isLtCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, topKind, ok := fl.GetStructFieldOK()
- if !ok || topKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() < topField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() < topField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() < topField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) < int64(topField.Len())
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- fieldTime := field.Convert(timeType).Interface().(time.Time)
- topTime := topField.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Before(topTime)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return field.String() < topField.String()
-}
-
-// isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
-func isGteCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, topKind, ok := fl.GetStructFieldOK()
- if !ok || topKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() >= topField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() >= topField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() >= topField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) >= int64(topField.Len())
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- fieldTime := field.Convert(timeType).Interface().(time.Time)
- topTime := topField.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.After(topTime) || fieldTime.Equal(topTime)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return field.String() >= topField.String()
-}
-
-// isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
-func isGtCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, topKind, ok := fl.GetStructFieldOK()
- if !ok || topKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() > topField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() > topField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() > topField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) > int64(topField.Len())
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- fieldTime := field.Convert(timeType).Interface().(time.Time)
- topTime := topField.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.After(topTime)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return field.String() > topField.String()
-}
-
-// isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
-func isNeCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return true
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return topField.Int() != field.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return topField.Uint() != field.Uint()
-
- case reflect.Float32, reflect.Float64:
- return topField.Float() != field.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(topField.Len()) != int64(field.Len())
-
- case reflect.Bool:
- return topField.Bool() != field.Bool()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- t := field.Convert(timeType).Interface().(time.Time)
- fieldTime := topField.Convert(timeType).Interface().(time.Time)
-
- return !fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return true
- }
- }
-
- // default reflect.String:
- return topField.String() != field.String()
-}
-
-// isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
-func isEqCrossStructField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- topField, topKind, ok := fl.GetStructFieldOK()
- if !ok || topKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return topField.Int() == field.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return topField.Uint() == field.Uint()
-
- case reflect.Float32, reflect.Float64:
- return topField.Float() == field.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(topField.Len()) == int64(field.Len())
-
- case reflect.Bool:
- return topField.Bool() == field.Bool()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
-
- t := field.Convert(timeType).Interface().(time.Time)
- fieldTime := topField.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return topField.String() == field.String()
-}
-
-// isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
-func isEqField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() == currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() == currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
- return field.Float() == currentField.Float()
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) == int64(currentField.Len())
-
- case reflect.Bool:
- return field.Bool() == currentField.Bool()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Convert(timeType).Interface().(time.Time)
- fieldTime := field.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
- }
-
- // default reflect.String:
- return field.String() == currentField.String()
-}
-
-// isEq is the validation function for validating if the current field's value is equal to the param's value.
-func isEq(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- return field.String() == param
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) == p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() == p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() == p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() == p
-
- case reflect.Bool:
- p := asBool(param)
-
- return field.Bool() == p
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isEqIgnoreCase is the validation function for validating if the current field's string value is
-// equal to the param's value.
-// The comparison is case-insensitive.
-func isEqIgnoreCase(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- return strings.EqualFold(field.String(), param)
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2
-// example: `postcode_iso3166_alpha2=US`
-func isPostcodeByIso3166Alpha2(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- reg, found := postCodeRegexDict[param]
- if !found {
- return false
- }
-
- return reg.MatchString(field.String())
-}
-
-// isPostcodeByIso3166Alpha2Field validates by field which represents for a value of country code in iso 3166 alpha 2
-// example: `postcode_iso3166_alpha2_field=CountryCode`
-func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool {
- field := fl.Field()
- params := parseOneOfParam2(fl.Param())
-
- if len(params) != 1 {
- return false
- }
-
- currentField, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), params[0])
- if !found {
- return false
- }
-
- if kind != reflect.String {
- panic(fmt.Sprintf("Bad field type %T", currentField.Interface()))
- }
-
- reg, found := postCodeRegexDict[currentField.String()]
- if !found {
- return false
- }
-
- return reg.MatchString(field.String())
-}
-
-// isBase64 is the validation function for validating if the current field's value is a valid base 64.
-func isBase64(fl FieldLevel) bool {
- return base64Regex.MatchString(fl.Field().String())
-}
-
-// isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
-func isBase64URL(fl FieldLevel) bool {
- return base64URLRegex.MatchString(fl.Field().String())
-}
-
-// isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding.
-func isBase64RawURL(fl FieldLevel) bool {
- return base64RawURLRegex.MatchString(fl.Field().String())
-}
-
-// isURI is the validation function for validating if the current field's value is a valid URI.
-func isURI(fl FieldLevel) bool {
- field := fl.Field()
-
- switch field.Kind() {
- case reflect.String:
-
- s := field.String()
-
- // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
- // emulate browser and strip the '#' suffix prior to validation. see issue-#237
- if i := strings.Index(s, "#"); i > -1 {
- s = s[:i]
- }
-
- if len(s) == 0 {
- return false
- }
-
- _, err := url.ParseRequestURI(s)
-
- return err == nil
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isURL is the validation function for validating if the current field's value is a valid URL.
-func isURL(fl FieldLevel) bool {
- field := fl.Field()
-
- switch field.Kind() {
- case reflect.String:
-
- var i int
- s := field.String()
-
- // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
- // emulate browser and strip the '#' suffix prior to validation. see issue-#237
- if i = strings.Index(s, "#"); i > -1 {
- s = s[:i]
- }
-
- if len(s) == 0 {
- return false
- }
-
- url, err := url.ParseRequestURI(s)
-
- if err != nil || url.Scheme == "" {
- return false
- }
-
- return true
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isHttpURL is the validation function for validating if the current field's value is a valid HTTP(s) URL.
-func isHttpURL(fl FieldLevel) bool {
- if !isURL(fl) {
- return false
- }
-
- field := fl.Field()
- switch field.Kind() {
- case reflect.String:
-
- s := strings.ToLower(field.String())
- return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
-func isUrnRFC2141(fl FieldLevel) bool {
- field := fl.Field()
-
- switch field.Kind() {
- case reflect.String:
-
- str := field.String()
-
- _, match := urn.Parse([]byte(str))
-
- return match
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isFile is the validation function for validating if the current field's value is a valid existing file path.
-func isFile(fl FieldLevel) bool {
- field := fl.Field()
-
- switch field.Kind() {
- case reflect.String:
- fileInfo, err := os.Stat(field.String())
- if err != nil {
- return false
- }
-
- return !fileInfo.IsDir()
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isFilePath is the validation function for validating if the current field's value is a valid file path.
-func isFilePath(fl FieldLevel) bool {
-
- var exists bool
- var err error
-
- field := fl.Field()
-
- // If it exists, it obviously is valid.
- // This is done first to avoid code duplication and unnecessary additional logic.
- if exists = isFile(fl); exists {
- return true
- }
-
- // It does not exist but may still be a valid filepath.
- switch field.Kind() {
- case reflect.String:
- // Every OS allows for whitespace, but none
- // let you use a file with no filename (to my knowledge).
- // Unless you're dealing with raw inodes, but I digress.
- if strings.TrimSpace(field.String()) == "" {
- return false
- }
- // We make sure it isn't a directory.
- if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
- return false
- }
- if _, err = os.Stat(field.String()); err != nil {
- switch t := err.(type) {
- case *fs.PathError:
- if t.Err == syscall.EINVAL {
- // It's definitely an invalid character in the filepath.
- return false
- }
- // It could be a permission error, a does-not-exist error, etc.
- // Out-of-scope for this validation, though.
- return true
- default:
- // Something went *seriously* wrong.
- /*
- Per https://pkg.go.dev/os#Stat:
- "If there is an error, it will be of type *PathError."
- */
- panic(err)
- }
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
-func isE164(fl FieldLevel) bool {
- return e164Regex.MatchString(fl.Field().String())
-}
-
-// isEmail is the validation function for validating if the current field's value is a valid email address.
-func isEmail(fl FieldLevel) bool {
- return emailRegex.MatchString(fl.Field().String())
-}
-
-// isHSLA is the validation function for validating if the current field's value is a valid HSLA color.
-func isHSLA(fl FieldLevel) bool {
- return hslaRegex.MatchString(fl.Field().String())
-}
-
-// isHSL is the validation function for validating if the current field's value is a valid HSL color.
-func isHSL(fl FieldLevel) bool {
- return hslRegex.MatchString(fl.Field().String())
-}
-
-// isRGBA is the validation function for validating if the current field's value is a valid RGBA color.
-func isRGBA(fl FieldLevel) bool {
- return rgbaRegex.MatchString(fl.Field().String())
-}
-
-// isRGB is the validation function for validating if the current field's value is a valid RGB color.
-func isRGB(fl FieldLevel) bool {
- return rgbRegex.MatchString(fl.Field().String())
-}
-
-// isHEXColor is the validation function for validating if the current field's value is a valid HEX color.
-func isHEXColor(fl FieldLevel) bool {
- return hexColorRegex.MatchString(fl.Field().String())
-}
-
-// isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
-func isHexadecimal(fl FieldLevel) bool {
- return hexadecimalRegex.MatchString(fl.Field().String())
-}
-
-// isNumber is the validation function for validating if the current field's value is a valid number.
-func isNumber(fl FieldLevel) bool {
- switch fl.Field().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
- return true
- default:
- return numberRegex.MatchString(fl.Field().String())
- }
-}
-
-// isNumeric is the validation function for validating if the current field's value is a valid numeric value.
-func isNumeric(fl FieldLevel) bool {
- switch fl.Field().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
- return true
- default:
- return numericRegex.MatchString(fl.Field().String())
- }
-}
-
-// isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
-func isAlphanum(fl FieldLevel) bool {
- return alphaNumericRegex.MatchString(fl.Field().String())
-}
-
-// isAlpha is the validation function for validating if the current field's value is a valid alpha value.
-func isAlpha(fl FieldLevel) bool {
- return alphaRegex.MatchString(fl.Field().String())
-}
-
-// isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
-func isAlphanumUnicode(fl FieldLevel) bool {
- return alphaUnicodeNumericRegex.MatchString(fl.Field().String())
-}
-
-// isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
-func isAlphaUnicode(fl FieldLevel) bool {
- return alphaUnicodeRegex.MatchString(fl.Field().String())
-}
-
-// isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value.
-func isBoolean(fl FieldLevel) bool {
- switch fl.Field().Kind() {
- case reflect.Bool:
- return true
- default:
- _, err := strconv.ParseBool(fl.Field().String())
- return err == nil
- }
-}
-
-// isDefault is the opposite of required aka hasValue
-func isDefault(fl FieldLevel) bool {
- return !hasValue(fl)
-}
-
-// hasValue is the validation function for validating if the current field's value is not the default static value.
-func hasValue(fl FieldLevel) bool {
- field := fl.Field()
- switch field.Kind() {
- case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
- return !field.IsNil()
- default:
- if fl.(*validate).fldIsPointer && field.Interface() != nil {
- return true
- }
- return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
- }
-}
-
-// requireCheckFieldKind is a func for check field kind
-func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool {
- field := fl.Field()
- kind := field.Kind()
- var nullable, found bool
- if len(param) > 0 {
- field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
- if !found {
- return defaultNotFoundValue
- }
- }
- switch kind {
- case reflect.Invalid:
- return defaultNotFoundValue
- case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
- return field.IsNil()
- default:
- if nullable && field.Interface() != nil {
- return false
- }
- return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface()
- }
-}
-
-// requireCheckFieldValue is a func for check field value
-func requireCheckFieldValue(
- fl FieldLevel, param string, value string, defaultNotFoundValue bool,
-) bool {
- field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
- if !found {
- return defaultNotFoundValue
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return field.Int() == asInt(value)
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return field.Uint() == asUint(value)
-
- case reflect.Float32, reflect.Float64:
- return field.Float() == asFloat(value)
-
- case reflect.Slice, reflect.Map, reflect.Array:
- return int64(field.Len()) == asInt(value)
-
- case reflect.Bool:
- return field.Bool() == asBool(value)
- }
-
- // default reflect.String:
- return field.String() == value
-}
-
-// requiredIf is the validation function
-// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field.
-func requiredIf(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- if len(params)%2 != 0 {
- panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName()))
- }
- for i := 0; i < len(params); i += 2 {
- if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return true
- }
- }
- return hasValue(fl)
-}
-
-// excludedIf is the validation function
-// The field under validation must not be present or is empty only if all the other specified fields are equal to the value following with the specified field.
-func excludedIf(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- if len(params)%2 != 0 {
- panic(fmt.Sprintf("Bad param number for excluded_if %s", fl.FieldName()))
- }
-
- for i := 0; i < len(params); i += 2 {
- if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return true
- }
- }
- return !hasValue(fl)
-}
-
-// requiredUnless is the validation function
-// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
-func requiredUnless(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- if len(params)%2 != 0 {
- panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName()))
- }
-
- for i := 0; i < len(params); i += 2 {
- if requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return true
- }
- }
- return hasValue(fl)
-}
-
-// skipUnless is the validation function
-// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
-func skipUnless(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- if len(params)%2 != 0 {
- panic(fmt.Sprintf("Bad param number for skip_unless %s", fl.FieldName()))
- }
- for i := 0; i < len(params); i += 2 {
- if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return true
- }
- }
- return hasValue(fl)
-}
-
-// excludedUnless is the validation function
-// The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field.
-func excludedUnless(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- if len(params)%2 != 0 {
- panic(fmt.Sprintf("Bad param number for excluded_unless %s", fl.FieldName()))
- }
- for i := 0; i < len(params); i += 2 {
- if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return !hasValue(fl)
- }
- }
- return true
-}
-
-// excludedWith is the validation function
-// The field under validation must not be present or is empty if any of the other specified fields are present.
-func excludedWith(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if !requireCheckFieldKind(fl, param, true) {
- return !hasValue(fl)
- }
- }
- return true
-}
-
-// requiredWith is the validation function
-// The field under validation must be present and not empty only if any of the other specified fields are present.
-func requiredWith(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if !requireCheckFieldKind(fl, param, true) {
- return hasValue(fl)
- }
- }
- return true
-}
-
-// excludedWithAll is the validation function
-// The field under validation must not be present or is empty if all of the other specified fields are present.
-func excludedWithAll(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if requireCheckFieldKind(fl, param, true) {
- return true
- }
- }
- return !hasValue(fl)
-}
-
-// requiredWithAll is the validation function
-// The field under validation must be present and not empty only if all of the other specified fields are present.
-func requiredWithAll(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if requireCheckFieldKind(fl, param, true) {
- return true
- }
- }
- return hasValue(fl)
-}
-
-// excludedWithout is the validation function
-// The field under validation must not be present or is empty when any of the other specified fields are not present.
-func excludedWithout(fl FieldLevel) bool {
- if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
- return !hasValue(fl)
- }
- return true
-}
-
-// requiredWithout is the validation function
-// The field under validation must be present and not empty only when any of the other specified fields are not present.
-func requiredWithout(fl FieldLevel) bool {
- if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
- return hasValue(fl)
- }
- return true
-}
-
-// excludedWithoutAll is the validation function
-// The field under validation must not be present or is empty when all of the other specified fields are not present.
-func excludedWithoutAll(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if !requireCheckFieldKind(fl, param, true) {
- return true
- }
- }
- return !hasValue(fl)
-}
-
-// requiredWithoutAll is the validation function
-// The field under validation must be present and not empty only when all of the other specified fields are not present.
-func requiredWithoutAll(fl FieldLevel) bool {
- params := parseOneOfParam2(fl.Param())
- for _, param := range params {
- if !requireCheckFieldKind(fl, param, true) {
- return true
- }
- }
- return hasValue(fl)
-}
-
-// isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
-func isGteField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
- return field.Int() >= currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
- return field.Uint() >= currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
-
- return field.Float() >= currentField.Float()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Convert(timeType).Interface().(time.Time)
- fieldTime := field.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.After(t) || fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
- }
-
- // default reflect.String
- return len(field.String()) >= len(currentField.String())
-}
-
-// isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
-func isGtField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
- return field.Int() > currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
- return field.Uint() > currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
-
- return field.Float() > currentField.Float()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Convert(timeType).Interface().(time.Time)
- fieldTime := field.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.After(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
- }
-
- // default reflect.String
- return len(field.String()) > len(currentField.String())
-}
-
-// isGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
-func isGte(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- p := asInt(param)
-
- return int64(utf8.RuneCountInString(field.String())) >= p
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) >= p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() >= p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() >= p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() >= p
-
- case reflect.Struct:
-
- if field.Type().ConvertibleTo(timeType) {
-
- now := time.Now().UTC()
- t := field.Convert(timeType).Interface().(time.Time)
-
- return t.After(now) || t.Equal(now)
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isGt is the validation function for validating if the current field's value is greater than the param's value.
-func isGt(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- p := asInt(param)
-
- return int64(utf8.RuneCountInString(field.String())) > p
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) > p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() > p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() > p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() > p
- case reflect.Struct:
-
- if field.Type().ConvertibleTo(timeType) {
-
- return field.Convert(timeType).Interface().(time.Time).After(time.Now().UTC())
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// hasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
-func hasLengthOf(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- p := asInt(param)
-
- return int64(utf8.RuneCountInString(field.String())) == p
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) == p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() == p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() == p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() == p
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// hasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
-func hasMinOf(fl FieldLevel) bool {
- return isGte(fl)
-}
-
-// isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
-func isLteField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
- return field.Int() <= currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
- return field.Uint() <= currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
-
- return field.Float() <= currentField.Float()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Convert(timeType).Interface().(time.Time)
- fieldTime := field.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Before(t) || fieldTime.Equal(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
- }
-
- // default reflect.String
- return len(field.String()) <= len(currentField.String())
-}
-
-// isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
-func isLtField(fl FieldLevel) bool {
- field := fl.Field()
- kind := field.Kind()
-
- currentField, currentKind, ok := fl.GetStructFieldOK()
- if !ok || currentKind != kind {
- return false
- }
-
- switch kind {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-
- return field.Int() < currentField.Int()
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-
- return field.Uint() < currentField.Uint()
-
- case reflect.Float32, reflect.Float64:
-
- return field.Float() < currentField.Float()
-
- case reflect.Struct:
-
- fieldType := field.Type()
-
- if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
-
- t := currentField.Convert(timeType).Interface().(time.Time)
- fieldTime := field.Convert(timeType).Interface().(time.Time)
-
- return fieldTime.Before(t)
- }
-
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
- }
-
- // default reflect.String
- return len(field.String()) < len(currentField.String())
-}
-
-// isLte is the validation function for validating if the current field's value is less than or equal to the param's value.
-func isLte(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- p := asInt(param)
-
- return int64(utf8.RuneCountInString(field.String())) <= p
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) <= p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() <= p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() <= p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() <= p
-
- case reflect.Struct:
-
- if field.Type().ConvertibleTo(timeType) {
-
- now := time.Now().UTC()
- t := field.Convert(timeType).Interface().(time.Time)
-
- return t.Before(now) || t.Equal(now)
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isLt is the validation function for validating if the current field's value is less than the param's value.
-func isLt(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- switch field.Kind() {
-
- case reflect.String:
- p := asInt(param)
-
- return int64(utf8.RuneCountInString(field.String())) < p
-
- case reflect.Slice, reflect.Map, reflect.Array:
- p := asInt(param)
-
- return int64(field.Len()) < p
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asIntFromType(field.Type(), param)
-
- return field.Int() < p
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- p := asUint(param)
-
- return field.Uint() < p
-
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
-
- return field.Float() < p
-
- case reflect.Struct:
-
- if field.Type().ConvertibleTo(timeType) {
-
- return field.Convert(timeType).Interface().(time.Time).Before(time.Now().UTC())
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// hasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
-func hasMaxOf(fl FieldLevel) bool {
- return isLte(fl)
-}
-
-// isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
-func isTCP4AddrResolvable(fl FieldLevel) bool {
- if !isIP4Addr(fl) {
- return false
- }
-
- _, err := net.ResolveTCPAddr("tcp4", fl.Field().String())
- return err == nil
-}
-
-// isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
-func isTCP6AddrResolvable(fl FieldLevel) bool {
- if !isIP6Addr(fl) {
- return false
- }
-
- _, err := net.ResolveTCPAddr("tcp6", fl.Field().String())
-
- return err == nil
-}
-
-// isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
-func isTCPAddrResolvable(fl FieldLevel) bool {
- if !isIP4Addr(fl) && !isIP6Addr(fl) {
- return false
- }
-
- _, err := net.ResolveTCPAddr("tcp", fl.Field().String())
-
- return err == nil
-}
-
-// isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
-func isUDP4AddrResolvable(fl FieldLevel) bool {
- if !isIP4Addr(fl) {
- return false
- }
-
- _, err := net.ResolveUDPAddr("udp4", fl.Field().String())
-
- return err == nil
-}
-
-// isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
-func isUDP6AddrResolvable(fl FieldLevel) bool {
- if !isIP6Addr(fl) {
- return false
- }
-
- _, err := net.ResolveUDPAddr("udp6", fl.Field().String())
-
- return err == nil
-}
-
-// isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
-func isUDPAddrResolvable(fl FieldLevel) bool {
- if !isIP4Addr(fl) && !isIP6Addr(fl) {
- return false
- }
-
- _, err := net.ResolveUDPAddr("udp", fl.Field().String())
-
- return err == nil
-}
-
-// isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
-func isIP4AddrResolvable(fl FieldLevel) bool {
- if !isIPv4(fl) {
- return false
- }
-
- _, err := net.ResolveIPAddr("ip4", fl.Field().String())
-
- return err == nil
-}
-
-// isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
-func isIP6AddrResolvable(fl FieldLevel) bool {
- if !isIPv6(fl) {
- return false
- }
-
- _, err := net.ResolveIPAddr("ip6", fl.Field().String())
-
- return err == nil
-}
-
-// isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
-func isIPAddrResolvable(fl FieldLevel) bool {
- if !isIP(fl) {
- return false
- }
-
- _, err := net.ResolveIPAddr("ip", fl.Field().String())
-
- return err == nil
-}
-
-// isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
-func isUnixAddrResolvable(fl FieldLevel) bool {
- _, err := net.ResolveUnixAddr("unix", fl.Field().String())
-
- return err == nil
-}
-
-func isIP4Addr(fl FieldLevel) bool {
- val := fl.Field().String()
-
- if idx := strings.LastIndex(val, ":"); idx != -1 {
- val = val[0:idx]
- }
-
- ip := net.ParseIP(val)
-
- return ip != nil && ip.To4() != nil
-}
-
-func isIP6Addr(fl FieldLevel) bool {
- val := fl.Field().String()
-
- if idx := strings.LastIndex(val, ":"); idx != -1 {
- if idx != 0 && val[idx-1:idx] == "]" {
- val = val[1 : idx-1]
- }
- }
-
- ip := net.ParseIP(val)
-
- return ip != nil && ip.To4() == nil
-}
-
-func isHostnameRFC952(fl FieldLevel) bool {
- return hostnameRegexRFC952.MatchString(fl.Field().String())
-}
-
-func isHostnameRFC1123(fl FieldLevel) bool {
- return hostnameRegexRFC1123.MatchString(fl.Field().String())
-}
-
-func isFQDN(fl FieldLevel) bool {
- val := fl.Field().String()
-
- if val == "" {
- return false
- }
-
- return fqdnRegexRFC1123.MatchString(val)
-}
-
-// isDir is the validation function for validating if the current field's value is a valid existing directory.
-func isDir(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- fileInfo, err := os.Stat(field.String())
- if err != nil {
- return false
- }
-
- return fileInfo.IsDir()
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isDirPath is the validation function for validating if the current field's value is a valid directory.
-func isDirPath(fl FieldLevel) bool {
-
- var exists bool
- var err error
-
- field := fl.Field()
-
- // If it exists, it obviously is valid.
- // This is done first to avoid code duplication and unnecessary additional logic.
- if exists = isDir(fl); exists {
- return true
- }
-
- // It does not exist but may still be a valid path.
- switch field.Kind() {
- case reflect.String:
- // Every OS allows for whitespace, but none
- // let you use a dir with no name (to my knowledge).
- // Unless you're dealing with raw inodes, but I digress.
- if strings.TrimSpace(field.String()) == "" {
- return false
- }
- if _, err = os.Stat(field.String()); err != nil {
- switch t := err.(type) {
- case *fs.PathError:
- if t.Err == syscall.EINVAL {
- // It's definitely an invalid character in the path.
- return false
- }
- // It could be a permission error, a does-not-exist error, etc.
- // Out-of-scope for this validation, though.
- // Lastly, we make sure it is a directory.
- if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
- return true
- } else {
- return false
- }
- default:
- // Something went *seriously* wrong.
- /*
- Per https://pkg.go.dev/os#Stat:
- "If there is an error, it will be of type *PathError."
- */
- panic(err)
- }
- }
- // We repeat the check here to make sure it is an explicit directory in case the above os.Stat didn't trigger an error.
- if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
- return true
- } else {
- return false
- }
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isJSON is the validation function for validating if the current field's value is a valid json string.
-func isJSON(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- val := field.String()
- return json.Valid([]byte(val))
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isJWT is the validation function for validating if the current field's value is a valid JWT string.
-func isJWT(fl FieldLevel) bool {
- return jWTRegex.MatchString(fl.Field().String())
-}
-
-// isHostnamePort validates a : combination for fields typically used for socket address.
-func isHostnamePort(fl FieldLevel) bool {
- val := fl.Field().String()
- host, port, err := net.SplitHostPort(val)
- if err != nil {
- return false
- }
- // Port must be a iny <= 65535.
- if portNum, err := strconv.ParseInt(
- port, 10, 32,
- ); err != nil || portNum > 65535 || portNum < 1 {
- return false
- }
-
- // If host is specified, it should match a DNS name
- if host != "" {
- return hostnameRegexRFC1123.MatchString(host)
- }
- return true
-}
-
-// isLowercase is the validation function for validating if the current field's value is a lowercase string.
-func isLowercase(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- if field.String() == "" {
- return false
- }
- return field.String() == strings.ToLower(field.String())
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isUppercase is the validation function for validating if the current field's value is an uppercase string.
-func isUppercase(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- if field.String() == "" {
- return false
- }
- return field.String() == strings.ToUpper(field.String())
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isDatetime is the validation function for validating if the current field's value is a valid datetime string.
-func isDatetime(fl FieldLevel) bool {
- field := fl.Field()
- param := fl.Param()
-
- if field.Kind() == reflect.String {
- _, err := time.Parse(param, field.String())
-
- return err == nil
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isTimeZone is the validation function for validating if the current field's value is a valid time zone string.
-func isTimeZone(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name
- if field.String() == "" {
- return false
- }
-
- // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name
- if strings.ToLower(field.String()) == "local" {
- return false
- }
-
- _, err := time.LoadLocation(field.String())
- return err == nil
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code.
-func isIso3166Alpha2(fl FieldLevel) bool {
- val := fl.Field().String()
- return iso3166_1_alpha2[val]
-}
-
-// isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
-func isIso3166Alpha3(fl FieldLevel) bool {
- val := fl.Field().String()
- return iso3166_1_alpha3[val]
-}
-
-// isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
-func isIso3166AlphaNumeric(fl FieldLevel) bool {
- field := fl.Field()
-
- var code int
- switch field.Kind() {
- case reflect.String:
- i, err := strconv.Atoi(field.String())
- if err != nil {
- return false
- }
- code = i % 1000
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- code = int(field.Int() % 1000)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- code = int(field.Uint() % 1000)
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
- return iso3166_1_alpha_numeric[code]
-}
-
-// isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code.
-func isIso31662(fl FieldLevel) bool {
- val := fl.Field().String()
- return iso3166_2[val]
-}
-
-// isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code.
-func isIso4217(fl FieldLevel) bool {
- val := fl.Field().String()
- return iso4217[val]
-}
-
-// isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code.
-func isIso4217Numeric(fl FieldLevel) bool {
- field := fl.Field()
-
- var code int
- switch field.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- code = int(field.Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- code = int(field.Uint())
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
- return iso4217_numeric[code]
-}
-
-// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse
-func isBCP47LanguageTag(fl FieldLevel) bool {
- field := fl.Field()
-
- if field.Kind() == reflect.String {
- _, err := language.Parse(field.String())
- return err == nil
- }
-
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
-}
-
-// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362
-func isIsoBicFormat(fl FieldLevel) bool {
- bicString := fl.Field().String()
-
- return bicRegex.MatchString(bicString)
-}
-
-// isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0
-func isSemverFormat(fl FieldLevel) bool {
- semverString := fl.Field().String()
-
- return semverRegex.MatchString(semverString)
-}
-
-// isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org
-func isCveFormat(fl FieldLevel) bool {
- cveString := fl.Field().String()
-
- return cveRegex.MatchString(cveString)
-}
-
-// isDnsRFC1035LabelFormat is the validation function
-// for validating if the current field's value is
-// a valid dns RFC 1035 label, defined in RFC 1035.
-func isDnsRFC1035LabelFormat(fl FieldLevel) bool {
- val := fl.Field().String()
- return dnsRegexRFC1035Label.MatchString(val)
-}
-
-// digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements
-func digitsHaveLuhnChecksum(digits []string) bool {
- size := len(digits)
- sum := 0
- for i, digit := range digits {
- value, err := strconv.Atoi(digit)
- if err != nil {
- return false
- }
- if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 {
- v := value * 2
- if v >= 10 {
- sum += 1 + (v % 10)
- } else {
- sum += v
- }
- } else {
- sum += value
- }
- }
- return (sum % 10) == 0
-}
-
-// isMongoDB is the validation function for validating if the current field's value is valid mongoDB objectID
-func isMongoDB(fl FieldLevel) bool {
- val := fl.Field().String()
- return mongodbRegex.MatchString(val)
-}
-
-// isCreditCard is the validation function for validating if the current field's value is a valid credit card number
-func isCreditCard(fl FieldLevel) bool {
- val := fl.Field().String()
- var creditCard bytes.Buffer
- segments := strings.Split(val, " ")
- for _, segment := range segments {
- if len(segment) < 3 {
- return false
- }
- creditCard.WriteString(segment)
- }
-
- ccDigits := strings.Split(creditCard.String(), "")
- size := len(ccDigits)
- if size < 12 || size > 19 {
- return false
- }
-
- return digitsHaveLuhnChecksum(ccDigits)
-}
-
-// hasLuhnChecksum is the validation for validating if the current field's value has a valid Luhn checksum
-func hasLuhnChecksum(fl FieldLevel) bool {
- field := fl.Field()
- var str string // convert to a string which will then be split into single digits; easier and more readable than shifting/extracting single digits from a number
- switch field.Kind() {
- case reflect.String:
- str = field.String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- str = strconv.FormatInt(field.Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- str = strconv.FormatUint(field.Uint(), 10)
- default:
- panic(fmt.Sprintf("Bad field type %T", field.Interface()))
- }
- size := len(str)
- if size < 2 { // there has to be at least one digit that carries a meaning + the checksum
- return false
- }
- digits := strings.Split(str, "")
- return digitsHaveLuhnChecksum(digits)
-}
-
-// isCron is the validation function for validating if the current field's value is a valid cron expression
-func isCron(fl FieldLevel) bool {
- cronString := fl.Field().String()
- return cronRegex.MatchString(cronString)
-}
diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go
deleted file mode 100644
index bbfd2a4af1..0000000000
--- a/vendor/github.com/go-playground/validator/v10/cache.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package validator
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-type tagType uint8
-
-const (
- typeDefault tagType = iota
- typeOmitEmpty
- typeIsDefault
- typeNoStructLevel
- typeStructOnly
- typeDive
- typeOr
- typeKeys
- typeEndKeys
-)
-
-const (
- invalidValidation = "Invalid validation tag on field '%s'"
- undefinedValidation = "Undefined validation function '%s' on field '%s'"
- keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
-)
-
-type structCache struct {
- lock sync.Mutex
- m atomic.Value // map[reflect.Type]*cStruct
-}
-
-func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
- c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
- return
-}
-
-func (sc *structCache) Set(key reflect.Type, value *cStruct) {
- m := sc.m.Load().(map[reflect.Type]*cStruct)
- nm := make(map[reflect.Type]*cStruct, len(m)+1)
- for k, v := range m {
- nm[k] = v
- }
- nm[key] = value
- sc.m.Store(nm)
-}
-
-type tagCache struct {
- lock sync.Mutex
- m atomic.Value // map[string]*cTag
-}
-
-func (tc *tagCache) Get(key string) (c *cTag, found bool) {
- c, found = tc.m.Load().(map[string]*cTag)[key]
- return
-}
-
-func (tc *tagCache) Set(key string, value *cTag) {
- m := tc.m.Load().(map[string]*cTag)
- nm := make(map[string]*cTag, len(m)+1)
- for k, v := range m {
- nm[k] = v
- }
- nm[key] = value
- tc.m.Store(nm)
-}
-
-type cStruct struct {
- name string
- fields []*cField
- fn StructLevelFuncCtx
-}
-
-type cField struct {
- idx int
- name string
- altName string
- namesEqual bool
- cTags *cTag
-}
-
-type cTag struct {
- tag string
- aliasTag string
- actualAliasTag string
- param string
- keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
- next *cTag
- fn FuncCtx
- typeof tagType
- hasTag bool
- hasAlias bool
- hasParam bool // true if parameter used eg. eq= where the equal sign has been set
- isBlockEnd bool // indicates the current tag represents the last validation in the block
- runValidationWhenNil bool
-}
-
-func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
- v.structCache.lock.Lock()
- defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
-
- typ := current.Type()
-
- // could have been multiple trying to access, but once first is done this ensures struct
- // isn't parsed again.
- cs, ok := v.structCache.Get(typ)
- if ok {
- return cs
- }
-
- cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
-
- numFields := current.NumField()
- rules := v.rules[typ]
-
- var ctag *cTag
- var fld reflect.StructField
- var tag string
- var customName string
-
- for i := 0; i < numFields; i++ {
-
- fld = typ.Field(i)
-
- if !fld.Anonymous && len(fld.PkgPath) > 0 {
- continue
- }
-
- if rtag, ok := rules[fld.Name]; ok {
- tag = rtag
- } else {
- tag = fld.Tag.Get(v.tagName)
- }
-
- if tag == skipValidationTag {
- continue
- }
-
- customName = fld.Name
-
- if v.hasTagNameFunc {
- name := v.tagNameFunc(fld)
- if len(name) > 0 {
- customName = name
- }
- }
-
- // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
- // and so only struct level caching can be used instead of combined with Field tag caching
-
- if len(tag) > 0 {
- ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
- } else {
- // even if field doesn't have validations need cTag for traversing to potential inner/nested
- // elements of the field.
- ctag = new(cTag)
- }
-
- cs.fields = append(cs.fields, &cField{
- idx: i,
- name: fld.Name,
- altName: customName,
- cTags: ctag,
- namesEqual: fld.Name == customName,
- })
- }
- v.structCache.Set(typ, cs)
- return cs
-}
-
-func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
- var t string
- noAlias := len(alias) == 0
- tags := strings.Split(tag, tagSeparator)
-
- for i := 0; i < len(tags); i++ {
- t = tags[i]
- if noAlias {
- alias = t
- }
-
- // check map for alias and process new tags, otherwise process as usual
- if tagsVal, found := v.aliases[t]; found {
- if i == 0 {
- firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
- } else {
- next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
- current.next, current = next, curr
-
- }
- continue
- }
-
- var prevTag tagType
-
- if i == 0 {
- current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault}
- firstCtag = current
- } else {
- prevTag = current.typeof
- current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
- current = current.next
- }
-
- switch t {
- case diveTag:
- current.typeof = typeDive
- continue
-
- case keysTag:
- current.typeof = typeKeys
-
- if i == 0 || prevTag != typeDive {
- panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag))
- }
-
- current.typeof = typeKeys
-
- // need to pass along only keys tag
- // need to increment i to skip over the keys tags
- b := make([]byte, 0, 64)
-
- i++
-
- for ; i < len(tags); i++ {
-
- b = append(b, tags[i]...)
- b = append(b, ',')
-
- if tags[i] == endKeysTag {
- break
- }
- }
-
- current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
- continue
-
- case endKeysTag:
- current.typeof = typeEndKeys
-
- // if there are more in tags then there was no keysTag defined
- // and an error should be thrown
- if i != len(tags)-1 {
- panic(keysTagNotDefined)
- }
- return
-
- case omitempty:
- current.typeof = typeOmitEmpty
- continue
-
- case structOnlyTag:
- current.typeof = typeStructOnly
- continue
-
- case noStructLevelTag:
- current.typeof = typeNoStructLevel
- continue
-
- default:
- if t == isdefault {
- current.typeof = typeIsDefault
- }
- // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
- orVals := strings.Split(t, orSeparator)
-
- for j := 0; j < len(orVals); j++ {
- vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
- if noAlias {
- alias = vals[0]
- current.aliasTag = alias
- } else {
- current.actualAliasTag = t
- }
-
- if j > 0 {
- current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
- current = current.next
- }
- current.hasParam = len(vals) > 1
-
- current.tag = vals[0]
- if len(current.tag) == 0 {
- panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
- }
-
- if wrapper, ok := v.validations[current.tag]; ok {
- current.fn = wrapper.fn
- current.runValidationWhenNil = wrapper.runValidatinOnNil
- } else {
- panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
- }
-
- if len(orVals) > 1 {
- current.typeof = typeOr
- }
-
- if len(vals) > 1 {
- current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
- }
- }
- current.isBlockEnd = true
- }
- }
- return
-}
-
-func (v *Validate) fetchCacheTag(tag string) *cTag {
- // find cached tag
- ctag, found := v.tagCache.Get(tag)
- if !found {
- v.tagCache.lock.Lock()
- defer v.tagCache.lock.Unlock()
-
- // could have been multiple trying to access, but once first is done this ensures tag
- // isn't parsed again.
- ctag, found = v.tagCache.Get(tag)
- if !found {
- ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
- v.tagCache.Set(tag, ctag)
- }
- }
- return ctag
-}
diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go
deleted file mode 100644
index 91b2e0b909..0000000000
--- a/vendor/github.com/go-playground/validator/v10/country_codes.go
+++ /dev/null
@@ -1,1150 +0,0 @@
-package validator
-
-var iso3166_1_alpha2 = map[string]bool{
- // see: https://www.iso.org/iso-3166-country-codes.html
- "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true,
- "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true,
- "AR": true, "AM": true, "AW": true, "AU": true, "AT": true,
- "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true,
- "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true,
- "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true,
- "BV": true, "BR": true, "IO": true, "BN": true, "BG": true,
- "BF": true, "BI": true, "KH": true, "CM": true, "CA": true,
- "CV": true, "KY": true, "CF": true, "TD": true, "CL": true,
- "CN": true, "CX": true, "CC": true, "CO": true, "KM": true,
- "CG": true, "CD": true, "CK": true, "CR": true, "CI": true,
- "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true,
- "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true,
- "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true,
- "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true,
- "FR": true, "GF": true, "PF": true, "TF": true, "GA": true,
- "GM": true, "GE": true, "DE": true, "GH": true, "GI": true,
- "GR": true, "GL": true, "GD": true, "GP": true, "GU": true,
- "GT": true, "GG": true, "GN": true, "GW": true, "GY": true,
- "HT": true, "HM": true, "VA": true, "HN": true, "HK": true,
- "HU": true, "IS": true, "IN": true, "ID": true, "IR": true,
- "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true,
- "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true,
- "KE": true, "KI": true, "KP": true, "KR": true, "KW": true,
- "KG": true, "LA": true, "LV": true, "LB": true, "LS": true,
- "LR": true, "LY": true, "LI": true, "LT": true, "LU": true,
- "MO": true, "MK": true, "MG": true, "MW": true, "MY": true,
- "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true,
- "MR": true, "MU": true, "YT": true, "MX": true, "FM": true,
- "MD": true, "MC": true, "MN": true, "ME": true, "MS": true,
- "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true,
- "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true,
- "NE": true, "NG": true, "NU": true, "NF": true, "MP": true,
- "NO": true, "OM": true, "PK": true, "PW": true, "PS": true,
- "PA": true, "PG": true, "PY": true, "PE": true, "PH": true,
- "PN": true, "PL": true, "PT": true, "PR": true, "QA": true,
- "RE": true, "RO": true, "RU": true, "RW": true, "BL": true,
- "SH": true, "KN": true, "LC": true, "MF": true, "PM": true,
- "VC": true, "WS": true, "SM": true, "ST": true, "SA": true,
- "SN": true, "RS": true, "SC": true, "SL": true, "SG": true,
- "SX": true, "SK": true, "SI": true, "SB": true, "SO": true,
- "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true,
- "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true,
- "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true,
- "TH": true, "TL": true, "TG": true, "TK": true, "TO": true,
- "TT": true, "TN": true, "TR": true, "TM": true, "TC": true,
- "TV": true, "UG": true, "UA": true, "AE": true, "GB": true,
- "US": true, "UM": true, "UY": true, "UZ": true, "VU": true,
- "VE": true, "VN": true, "VG": true, "VI": true, "WF": true,
- "EH": true, "YE": true, "ZM": true, "ZW": true, "XK": true,
-}
-
-var iso3166_1_alpha3 = map[string]bool{
- // see: https://www.iso.org/iso-3166-country-codes.html
- "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true,
- "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true,
- "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true,
- "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true,
- "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true,
- "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true,
- "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true,
- "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true,
- "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true,
- "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true,
- "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true,
- "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true,
- "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true,
- "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true,
- "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true,
- "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true,
- "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true,
- "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true,
- "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true,
- "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true,
- "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true,
- "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true,
- "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true,
- "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true,
- "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true,
- "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true,
- "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true,
- "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true,
- "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true,
- "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true,
- "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true,
- "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true,
- "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true,
- "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true,
- "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true,
- "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true,
- "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true,
- "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true,
- "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true,
- "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true,
- "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true,
- "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true,
- "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true,
- "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true,
- "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true,
- "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true,
- "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true,
- "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true,
- "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true,
- "YEM": true, "ZMB": true, "ZWE": true, "ALA": true, "UNK": true,
-}
-var iso3166_1_alpha_numeric = map[int]bool{
- // see: https://www.iso.org/iso-3166-country-codes.html
- 4: true, 8: true, 12: true, 16: true, 20: true,
- 24: true, 660: true, 10: true, 28: true, 32: true,
- 51: true, 533: true, 36: true, 40: true, 31: true,
- 44: true, 48: true, 50: true, 52: true, 112: true,
- 56: true, 84: true, 204: true, 60: true, 64: true,
- 68: true, 535: true, 70: true, 72: true, 74: true,
- 76: true, 86: true, 96: true, 100: true, 854: true,
- 108: true, 132: true, 116: true, 120: true, 124: true,
- 136: true, 140: true, 148: true, 152: true, 156: true,
- 162: true, 166: true, 170: true, 174: true, 180: true,
- 178: true, 184: true, 188: true, 191: true, 192: true,
- 531: true, 196: true, 203: true, 384: true, 208: true,
- 262: true, 212: true, 214: true, 218: true, 818: true,
- 222: true, 226: true, 232: true, 233: true, 748: true,
- 231: true, 238: true, 234: true, 242: true, 246: true,
- 250: true, 254: true, 258: true, 260: true, 266: true,
- 270: true, 268: true, 276: true, 288: true, 292: true,
- 300: true, 304: true, 308: true, 312: true, 316: true,
- 320: true, 831: true, 324: true, 624: true, 328: true,
- 332: true, 334: true, 336: true, 340: true, 344: true,
- 348: true, 352: true, 356: true, 360: true, 364: true,
- 368: true, 372: true, 833: true, 376: true, 380: true,
- 388: true, 392: true, 832: true, 400: true, 398: true,
- 404: true, 296: true, 408: true, 410: true, 414: true,
- 417: true, 418: true, 428: true, 422: true, 426: true,
- 430: true, 434: true, 438: true, 440: true, 442: true,
- 446: true, 450: true, 454: true, 458: true, 462: true,
- 466: true, 470: true, 584: true, 474: true, 478: true,
- 480: true, 175: true, 484: true, 583: true, 498: true,
- 492: true, 496: true, 499: true, 500: true, 504: true,
- 508: true, 104: true, 516: true, 520: true, 524: true,
- 528: true, 540: true, 554: true, 558: true, 562: true,
- 566: true, 570: true, 574: true, 807: true, 580: true,
- 578: true, 512: true, 586: true, 585: true, 275: true,
- 591: true, 598: true, 600: true, 604: true, 608: true,
- 612: true, 616: true, 620: true, 630: true, 634: true,
- 642: true, 643: true, 646: true, 638: true, 652: true,
- 654: true, 659: true, 662: true, 663: true, 666: true,
- 670: true, 882: true, 674: true, 678: true, 682: true,
- 686: true, 688: true, 690: true, 694: true, 702: true,
- 534: true, 703: true, 705: true, 90: true, 706: true,
- 710: true, 239: true, 728: true, 724: true, 144: true,
- 729: true, 740: true, 744: true, 752: true, 756: true,
- 760: true, 158: true, 762: true, 834: true, 764: true,
- 626: true, 768: true, 772: true, 776: true, 780: true,
- 788: true, 792: true, 795: true, 796: true, 798: true,
- 800: true, 804: true, 784: true, 826: true, 581: true,
- 840: true, 858: true, 860: true, 548: true, 862: true,
- 704: true, 92: true, 850: true, 876: true, 732: true,
- 887: true, 894: true, 716: true, 248: true, 153: true,
-}
-
-var iso3166_2 = map[string]bool{
- "AD-02": true, "AD-03": true, "AD-04": true, "AD-05": true, "AD-06": true,
- "AD-07": true, "AD-08": true, "AE-AJ": true, "AE-AZ": true, "AE-DU": true,
- "AE-FU": true, "AE-RK": true, "AE-SH": true, "AE-UQ": true, "AF-BAL": true,
- "AF-BAM": true, "AF-BDG": true, "AF-BDS": true, "AF-BGL": true, "AF-DAY": true,
- "AF-FRA": true, "AF-FYB": true, "AF-GHA": true, "AF-GHO": true, "AF-HEL": true,
- "AF-HER": true, "AF-JOW": true, "AF-KAB": true, "AF-KAN": true, "AF-KAP": true,
- "AF-KDZ": true, "AF-KHO": true, "AF-KNR": true, "AF-LAG": true, "AF-LOG": true,
- "AF-NAN": true, "AF-NIM": true, "AF-NUR": true, "AF-PAN": true, "AF-PAR": true,
- "AF-PIA": true, "AF-PKA": true, "AF-SAM": true, "AF-SAR": true, "AF-TAK": true,
- "AF-URU": true, "AF-WAR": true, "AF-ZAB": true, "AG-03": true, "AG-04": true,
- "AG-05": true, "AG-06": true, "AG-07": true, "AG-08": true, "AG-10": true,
- "AG-11": true, "AL-01": true, "AL-02": true, "AL-03": true, "AL-04": true,
- "AL-05": true, "AL-06": true, "AL-07": true, "AL-08": true, "AL-09": true,
- "AL-10": true, "AL-11": true, "AL-12": true, "AL-BR": true, "AL-BU": true,
- "AL-DI": true, "AL-DL": true, "AL-DR": true, "AL-DV": true, "AL-EL": true,
- "AL-ER": true, "AL-FR": true, "AL-GJ": true, "AL-GR": true, "AL-HA": true,
- "AL-KA": true, "AL-KB": true, "AL-KC": true, "AL-KO": true, "AL-KR": true,
- "AL-KU": true, "AL-LB": true, "AL-LE": true, "AL-LU": true, "AL-MK": true,
- "AL-MM": true, "AL-MR": true, "AL-MT": true, "AL-PG": true, "AL-PQ": true,
- "AL-PR": true, "AL-PU": true, "AL-SH": true, "AL-SK": true, "AL-SR": true,
- "AL-TE": true, "AL-TP": true, "AL-TR": true, "AL-VL": true, "AM-AG": true,
- "AM-AR": true, "AM-AV": true, "AM-ER": true, "AM-GR": true, "AM-KT": true,
- "AM-LO": true, "AM-SH": true, "AM-SU": true, "AM-TV": true, "AM-VD": true,
- "AO-BGO": true, "AO-BGU": true, "AO-BIE": true, "AO-CAB": true, "AO-CCU": true,
- "AO-CNN": true, "AO-CNO": true, "AO-CUS": true, "AO-HUA": true, "AO-HUI": true,
- "AO-LNO": true, "AO-LSU": true, "AO-LUA": true, "AO-MAL": true, "AO-MOX": true,
- "AO-NAM": true, "AO-UIG": true, "AO-ZAI": true, "AR-A": true, "AR-B": true,
- "AR-C": true, "AR-D": true, "AR-E": true, "AR-F": true, "AR-G": true, "AR-H": true,
- "AR-J": true, "AR-K": true, "AR-L": true, "AR-M": true, "AR-N": true,
- "AR-P": true, "AR-Q": true, "AR-R": true, "AR-S": true, "AR-T": true,
- "AR-U": true, "AR-V": true, "AR-W": true, "AR-X": true, "AR-Y": true,
- "AR-Z": true, "AT-1": true, "AT-2": true, "AT-3": true, "AT-4": true,
- "AT-5": true, "AT-6": true, "AT-7": true, "AT-8": true, "AT-9": true,
- "AU-ACT": true, "AU-NSW": true, "AU-NT": true, "AU-QLD": true, "AU-SA": true,
- "AU-TAS": true, "AU-VIC": true, "AU-WA": true, "AZ-ABS": true, "AZ-AGA": true,
- "AZ-AGC": true, "AZ-AGM": true, "AZ-AGS": true, "AZ-AGU": true, "AZ-AST": true,
- "AZ-BA": true, "AZ-BAB": true, "AZ-BAL": true, "AZ-BAR": true, "AZ-BEY": true,
- "AZ-BIL": true, "AZ-CAB": true, "AZ-CAL": true, "AZ-CUL": true, "AZ-DAS": true,
- "AZ-FUZ": true, "AZ-GA": true, "AZ-GAD": true, "AZ-GOR": true, "AZ-GOY": true,
- "AZ-GYG": true, "AZ-HAC": true, "AZ-IMI": true, "AZ-ISM": true, "AZ-KAL": true,
- "AZ-KAN": true, "AZ-KUR": true, "AZ-LA": true, "AZ-LAC": true, "AZ-LAN": true,
- "AZ-LER": true, "AZ-MAS": true, "AZ-MI": true, "AZ-NA": true, "AZ-NEF": true,
- "AZ-NV": true, "AZ-NX": true, "AZ-OGU": true, "AZ-ORD": true, "AZ-QAB": true,
- "AZ-QAX": true, "AZ-QAZ": true, "AZ-QBA": true, "AZ-QBI": true, "AZ-QOB": true,
- "AZ-QUS": true, "AZ-SA": true, "AZ-SAB": true, "AZ-SAD": true, "AZ-SAH": true,
- "AZ-SAK": true, "AZ-SAL": true, "AZ-SAR": true, "AZ-SAT": true, "AZ-SBN": true,
- "AZ-SIY": true, "AZ-SKR": true, "AZ-SM": true, "AZ-SMI": true, "AZ-SMX": true,
- "AZ-SR": true, "AZ-SUS": true, "AZ-TAR": true, "AZ-TOV": true, "AZ-UCA": true,
- "AZ-XA": true, "AZ-XAC": true, "AZ-XCI": true, "AZ-XIZ": true, "AZ-XVD": true,
- "AZ-YAR": true, "AZ-YE": true, "AZ-YEV": true, "AZ-ZAN": true, "AZ-ZAQ": true,
- "AZ-ZAR": true, "BA-01": true, "BA-02": true, "BA-03": true, "BA-04": true,
- "BA-05": true, "BA-06": true, "BA-07": true, "BA-08": true, "BA-09": true,
- "BA-10": true, "BA-BIH": true, "BA-BRC": true, "BA-SRP": true, "BB-01": true,
- "BB-02": true, "BB-03": true, "BB-04": true, "BB-05": true, "BB-06": true,
- "BB-07": true, "BB-08": true, "BB-09": true, "BB-10": true, "BB-11": true,
- "BD-01": true, "BD-02": true, "BD-03": true, "BD-04": true, "BD-05": true,
- "BD-06": true, "BD-07": true, "BD-08": true, "BD-09": true, "BD-10": true,
- "BD-11": true, "BD-12": true, "BD-13": true, "BD-14": true, "BD-15": true,
- "BD-16": true, "BD-17": true, "BD-18": true, "BD-19": true, "BD-20": true,
- "BD-21": true, "BD-22": true, "BD-23": true, "BD-24": true, "BD-25": true,
- "BD-26": true, "BD-27": true, "BD-28": true, "BD-29": true, "BD-30": true,
- "BD-31": true, "BD-32": true, "BD-33": true, "BD-34": true, "BD-35": true,
- "BD-36": true, "BD-37": true, "BD-38": true, "BD-39": true, "BD-40": true,
- "BD-41": true, "BD-42": true, "BD-43": true, "BD-44": true, "BD-45": true,
- "BD-46": true, "BD-47": true, "BD-48": true, "BD-49": true, "BD-50": true,
- "BD-51": true, "BD-52": true, "BD-53": true, "BD-54": true, "BD-55": true,
- "BD-56": true, "BD-57": true, "BD-58": true, "BD-59": true, "BD-60": true,
- "BD-61": true, "BD-62": true, "BD-63": true, "BD-64": true, "BD-A": true,
- "BD-B": true, "BD-C": true, "BD-D": true, "BD-E": true, "BD-F": true,
- "BD-G": true, "BE-BRU": true, "BE-VAN": true, "BE-VBR": true, "BE-VLG": true,
- "BE-VLI": true, "BE-VOV": true, "BE-VWV": true, "BE-WAL": true, "BE-WBR": true,
- "BE-WHT": true, "BE-WLG": true, "BE-WLX": true, "BE-WNA": true, "BF-01": true,
- "BF-02": true, "BF-03": true, "BF-04": true, "BF-05": true, "BF-06": true,
- "BF-07": true, "BF-08": true, "BF-09": true, "BF-10": true, "BF-11": true,
- "BF-12": true, "BF-13": true, "BF-BAL": true, "BF-BAM": true, "BF-BAN": true,
- "BF-BAZ": true, "BF-BGR": true, "BF-BLG": true, "BF-BLK": true, "BF-COM": true,
- "BF-GAN": true, "BF-GNA": true, "BF-GOU": true, "BF-HOU": true, "BF-IOB": true,
- "BF-KAD": true, "BF-KEN": true, "BF-KMD": true, "BF-KMP": true, "BF-KOP": true,
- "BF-KOS": true, "BF-KOT": true, "BF-KOW": true, "BF-LER": true, "BF-LOR": true,
- "BF-MOU": true, "BF-NAM": true, "BF-NAO": true, "BF-NAY": true, "BF-NOU": true,
- "BF-OUB": true, "BF-OUD": true, "BF-PAS": true, "BF-PON": true, "BF-SEN": true,
- "BF-SIS": true, "BF-SMT": true, "BF-SNG": true, "BF-SOM": true, "BF-SOR": true,
- "BF-TAP": true, "BF-TUI": true, "BF-YAG": true, "BF-YAT": true, "BF-ZIR": true,
- "BF-ZON": true, "BF-ZOU": true, "BG-01": true, "BG-02": true, "BG-03": true,
- "BG-04": true, "BG-05": true, "BG-06": true, "BG-07": true, "BG-08": true,
- "BG-09": true, "BG-10": true, "BG-11": true, "BG-12": true, "BG-13": true,
- "BG-14": true, "BG-15": true, "BG-16": true, "BG-17": true, "BG-18": true,
- "BG-19": true, "BG-20": true, "BG-21": true, "BG-22": true, "BG-23": true,
- "BG-24": true, "BG-25": true, "BG-26": true, "BG-27": true, "BG-28": true,
- "BH-13": true, "BH-14": true, "BH-15": true, "BH-16": true, "BH-17": true,
- "BI-BB": true, "BI-BL": true, "BI-BM": true, "BI-BR": true, "BI-CA": true,
- "BI-CI": true, "BI-GI": true, "BI-KI": true, "BI-KR": true, "BI-KY": true,
- "BI-MA": true, "BI-MU": true, "BI-MW": true, "BI-NG": true, "BI-RM": true, "BI-RT": true,
- "BI-RY": true, "BJ-AK": true, "BJ-AL": true, "BJ-AQ": true, "BJ-BO": true,
- "BJ-CO": true, "BJ-DO": true, "BJ-KO": true, "BJ-LI": true, "BJ-MO": true,
- "BJ-OU": true, "BJ-PL": true, "BJ-ZO": true, "BN-BE": true, "BN-BM": true,
- "BN-TE": true, "BN-TU": true, "BO-B": true, "BO-C": true, "BO-H": true,
- "BO-L": true, "BO-N": true, "BO-O": true, "BO-P": true, "BO-S": true,
- "BO-T": true, "BQ-BO": true, "BQ-SA": true, "BQ-SE": true, "BR-AC": true,
- "BR-AL": true, "BR-AM": true, "BR-AP": true, "BR-BA": true, "BR-CE": true,
- "BR-DF": true, "BR-ES": true, "BR-FN": true, "BR-GO": true, "BR-MA": true,
- "BR-MG": true, "BR-MS": true, "BR-MT": true, "BR-PA": true, "BR-PB": true,
- "BR-PE": true, "BR-PI": true, "BR-PR": true, "BR-RJ": true, "BR-RN": true,
- "BR-RO": true, "BR-RR": true, "BR-RS": true, "BR-SC": true, "BR-SE": true,
- "BR-SP": true, "BR-TO": true, "BS-AK": true, "BS-BI": true, "BS-BP": true,
- "BS-BY": true, "BS-CE": true, "BS-CI": true, "BS-CK": true, "BS-CO": true,
- "BS-CS": true, "BS-EG": true, "BS-EX": true, "BS-FP": true, "BS-GC": true,
- "BS-HI": true, "BS-HT": true, "BS-IN": true, "BS-LI": true, "BS-MC": true,
- "BS-MG": true, "BS-MI": true, "BS-NE": true, "BS-NO": true, "BS-NP": true, "BS-NS": true,
- "BS-RC": true, "BS-RI": true, "BS-SA": true, "BS-SE": true, "BS-SO": true,
- "BS-SS": true, "BS-SW": true, "BS-WG": true, "BT-11": true, "BT-12": true,
- "BT-13": true, "BT-14": true, "BT-15": true, "BT-21": true, "BT-22": true,
- "BT-23": true, "BT-24": true, "BT-31": true, "BT-32": true, "BT-33": true,
- "BT-34": true, "BT-41": true, "BT-42": true, "BT-43": true, "BT-44": true,
- "BT-45": true, "BT-GA": true, "BT-TY": true, "BW-CE": true, "BW-CH": true, "BW-GH": true,
- "BW-KG": true, "BW-KL": true, "BW-KW": true, "BW-NE": true, "BW-NW": true,
- "BW-SE": true, "BW-SO": true, "BY-BR": true, "BY-HM": true, "BY-HO": true,
- "BY-HR": true, "BY-MA": true, "BY-MI": true, "BY-VI": true, "BZ-BZ": true,
- "BZ-CY": true, "BZ-CZL": true, "BZ-OW": true, "BZ-SC": true, "BZ-TOL": true,
- "CA-AB": true, "CA-BC": true, "CA-MB": true, "CA-NB": true, "CA-NL": true,
- "CA-NS": true, "CA-NT": true, "CA-NU": true, "CA-ON": true, "CA-PE": true,
- "CA-QC": true, "CA-SK": true, "CA-YT": true, "CD-BC": true, "CD-BN": true,
- "CD-EQ": true, "CD-HK": true, "CD-IT": true, "CD-KA": true, "CD-KC": true, "CD-KE": true, "CD-KG": true, "CD-KN": true,
- "CD-KW": true, "CD-KS": true, "CD-LU": true, "CD-MA": true, "CD-NK": true, "CD-OR": true, "CD-SA": true, "CD-SK": true,
- "CD-TA": true, "CD-TO": true, "CF-AC": true, "CF-BB": true, "CF-BGF": true, "CF-BK": true, "CF-HK": true, "CF-HM": true,
- "CF-HS": true, "CF-KB": true, "CF-KG": true, "CF-LB": true, "CF-MB": true,
- "CF-MP": true, "CF-NM": true, "CF-OP": true, "CF-SE": true, "CF-UK": true,
- "CF-VK": true, "CG-11": true, "CG-12": true, "CG-13": true, "CG-14": true,
- "CG-15": true, "CG-16": true, "CG-2": true, "CG-5": true, "CG-7": true, "CG-8": true,
- "CG-9": true, "CG-BZV": true, "CH-AG": true, "CH-AI": true, "CH-AR": true,
- "CH-BE": true, "CH-BL": true, "CH-BS": true, "CH-FR": true, "CH-GE": true,
- "CH-GL": true, "CH-GR": true, "CH-JU": true, "CH-LU": true, "CH-NE": true,
- "CH-NW": true, "CH-OW": true, "CH-SG": true, "CH-SH": true, "CH-SO": true,
- "CH-SZ": true, "CH-TG": true, "CH-TI": true, "CH-UR": true, "CH-VD": true,
- "CH-VS": true, "CH-ZG": true, "CH-ZH": true, "CI-AB": true, "CI-BS": true,
- "CI-CM": true, "CI-DN": true, "CI-GD": true, "CI-LC": true, "CI-LG": true,
- "CI-MG": true, "CI-SM": true, "CI-SV": true, "CI-VB": true, "CI-WR": true,
- "CI-YM": true, "CI-ZZ": true, "CL-AI": true, "CL-AN": true, "CL-AP": true,
- "CL-AR": true, "CL-AT": true, "CL-BI": true, "CL-CO": true, "CL-LI": true,
- "CL-LL": true, "CL-LR": true, "CL-MA": true, "CL-ML": true, "CL-NB": true, "CL-RM": true,
- "CL-TA": true, "CL-VS": true, "CM-AD": true, "CM-CE": true, "CM-EN": true,
- "CM-ES": true, "CM-LT": true, "CM-NO": true, "CM-NW": true, "CM-OU": true,
- "CM-SU": true, "CM-SW": true, "CN-AH": true, "CN-BJ": true, "CN-CQ": true,
- "CN-FJ": true, "CN-GS": true, "CN-GD": true, "CN-GX": true, "CN-GZ": true,
- "CN-HI": true, "CN-HE": true, "CN-HL": true, "CN-HA": true, "CN-HB": true,
- "CN-HN": true, "CN-JS": true, "CN-JX": true, "CN-JL": true, "CN-LN": true,
- "CN-NM": true, "CN-NX": true, "CN-QH": true, "CN-SN": true, "CN-SD": true, "CN-SH": true,
- "CN-SX": true, "CN-SC": true, "CN-TJ": true, "CN-XJ": true, "CN-XZ": true, "CN-YN": true,
- "CN-ZJ": true, "CO-AMA": true, "CO-ANT": true, "CO-ARA": true, "CO-ATL": true,
- "CO-BOL": true, "CO-BOY": true, "CO-CAL": true, "CO-CAQ": true, "CO-CAS": true,
- "CO-CAU": true, "CO-CES": true, "CO-CHO": true, "CO-COR": true, "CO-CUN": true,
- "CO-DC": true, "CO-GUA": true, "CO-GUV": true, "CO-HUI": true, "CO-LAG": true,
- "CO-MAG": true, "CO-MET": true, "CO-NAR": true, "CO-NSA": true, "CO-PUT": true,
- "CO-QUI": true, "CO-RIS": true, "CO-SAN": true, "CO-SAP": true, "CO-SUC": true,
- "CO-TOL": true, "CO-VAC": true, "CO-VAU": true, "CO-VID": true, "CR-A": true,
- "CR-C": true, "CR-G": true, "CR-H": true, "CR-L": true, "CR-P": true,
- "CR-SJ": true, "CU-01": true, "CU-02": true, "CU-03": true, "CU-04": true,
- "CU-05": true, "CU-06": true, "CU-07": true, "CU-08": true, "CU-09": true,
- "CU-10": true, "CU-11": true, "CU-12": true, "CU-13": true, "CU-14": true, "CU-15": true,
- "CU-16": true, "CU-99": true, "CV-B": true, "CV-BR": true, "CV-BV": true, "CV-CA": true,
- "CV-CF": true, "CV-CR": true, "CV-MA": true, "CV-MO": true, "CV-PA": true,
- "CV-PN": true, "CV-PR": true, "CV-RB": true, "CV-RG": true, "CV-RS": true,
- "CV-S": true, "CV-SD": true, "CV-SF": true, "CV-SL": true, "CV-SM": true,
- "CV-SO": true, "CV-SS": true, "CV-SV": true, "CV-TA": true, "CV-TS": true,
- "CY-01": true, "CY-02": true, "CY-03": true, "CY-04": true, "CY-05": true,
- "CY-06": true, "CZ-10": true, "CZ-101": true, "CZ-102": true, "CZ-103": true,
- "CZ-104": true, "CZ-105": true, "CZ-106": true, "CZ-107": true, "CZ-108": true,
- "CZ-109": true, "CZ-110": true, "CZ-111": true, "CZ-112": true, "CZ-113": true,
- "CZ-114": true, "CZ-115": true, "CZ-116": true, "CZ-117": true, "CZ-118": true,
- "CZ-119": true, "CZ-120": true, "CZ-121": true, "CZ-122": true, "CZ-20": true,
- "CZ-201": true, "CZ-202": true, "CZ-203": true, "CZ-204": true, "CZ-205": true,
- "CZ-206": true, "CZ-207": true, "CZ-208": true, "CZ-209": true, "CZ-20A": true,
- "CZ-20B": true, "CZ-20C": true, "CZ-31": true, "CZ-311": true, "CZ-312": true,
- "CZ-313": true, "CZ-314": true, "CZ-315": true, "CZ-316": true, "CZ-317": true,
- "CZ-32": true, "CZ-321": true, "CZ-322": true, "CZ-323": true, "CZ-324": true,
- "CZ-325": true, "CZ-326": true, "CZ-327": true, "CZ-41": true, "CZ-411": true,
- "CZ-412": true, "CZ-413": true, "CZ-42": true, "CZ-421": true, "CZ-422": true,
- "CZ-423": true, "CZ-424": true, "CZ-425": true, "CZ-426": true, "CZ-427": true,
- "CZ-51": true, "CZ-511": true, "CZ-512": true, "CZ-513": true, "CZ-514": true,
- "CZ-52": true, "CZ-521": true, "CZ-522": true, "CZ-523": true, "CZ-524": true,
- "CZ-525": true, "CZ-53": true, "CZ-531": true, "CZ-532": true, "CZ-533": true,
- "CZ-534": true, "CZ-63": true, "CZ-631": true, "CZ-632": true, "CZ-633": true,
- "CZ-634": true, "CZ-635": true, "CZ-64": true, "CZ-641": true, "CZ-642": true,
- "CZ-643": true, "CZ-644": true, "CZ-645": true, "CZ-646": true, "CZ-647": true,
- "CZ-71": true, "CZ-711": true, "CZ-712": true, "CZ-713": true, "CZ-714": true,
- "CZ-715": true, "CZ-72": true, "CZ-721": true, "CZ-722": true, "CZ-723": true,
- "CZ-724": true, "CZ-80": true, "CZ-801": true, "CZ-802": true, "CZ-803": true,
- "CZ-804": true, "CZ-805": true, "CZ-806": true, "DE-BB": true, "DE-BE": true,
- "DE-BW": true, "DE-BY": true, "DE-HB": true, "DE-HE": true, "DE-HH": true,
- "DE-MV": true, "DE-NI": true, "DE-NW": true, "DE-RP": true, "DE-SH": true,
- "DE-SL": true, "DE-SN": true, "DE-ST": true, "DE-TH": true, "DJ-AR": true,
- "DJ-AS": true, "DJ-DI": true, "DJ-DJ": true, "DJ-OB": true, "DJ-TA": true,
- "DK-81": true, "DK-82": true, "DK-83": true, "DK-84": true, "DK-85": true,
- "DM-01": true, "DM-02": true, "DM-03": true, "DM-04": true, "DM-05": true,
- "DM-06": true, "DM-07": true, "DM-08": true, "DM-09": true, "DM-10": true,
- "DO-01": true, "DO-02": true, "DO-03": true, "DO-04": true, "DO-05": true,
- "DO-06": true, "DO-07": true, "DO-08": true, "DO-09": true, "DO-10": true,
- "DO-11": true, "DO-12": true, "DO-13": true, "DO-14": true, "DO-15": true,
- "DO-16": true, "DO-17": true, "DO-18": true, "DO-19": true, "DO-20": true,
- "DO-21": true, "DO-22": true, "DO-23": true, "DO-24": true, "DO-25": true,
- "DO-26": true, "DO-27": true, "DO-28": true, "DO-29": true, "DO-30": true, "DO-31": true,
- "DZ-01": true, "DZ-02": true, "DZ-03": true, "DZ-04": true, "DZ-05": true,
- "DZ-06": true, "DZ-07": true, "DZ-08": true, "DZ-09": true, "DZ-10": true,
- "DZ-11": true, "DZ-12": true, "DZ-13": true, "DZ-14": true, "DZ-15": true,
- "DZ-16": true, "DZ-17": true, "DZ-18": true, "DZ-19": true, "DZ-20": true,
- "DZ-21": true, "DZ-22": true, "DZ-23": true, "DZ-24": true, "DZ-25": true,
- "DZ-26": true, "DZ-27": true, "DZ-28": true, "DZ-29": true, "DZ-30": true,
- "DZ-31": true, "DZ-32": true, "DZ-33": true, "DZ-34": true, "DZ-35": true,
- "DZ-36": true, "DZ-37": true, "DZ-38": true, "DZ-39": true, "DZ-40": true,
- "DZ-41": true, "DZ-42": true, "DZ-43": true, "DZ-44": true, "DZ-45": true,
- "DZ-46": true, "DZ-47": true, "DZ-48": true, "DZ-49": true, "DZ-51": true,
- "DZ-53": true, "DZ-55": true, "DZ-56": true, "DZ-57": true, "EC-A": true, "EC-B": true,
- "EC-C": true, "EC-D": true, "EC-E": true, "EC-F": true, "EC-G": true,
- "EC-H": true, "EC-I": true, "EC-L": true, "EC-M": true, "EC-N": true,
- "EC-O": true, "EC-P": true, "EC-R": true, "EC-S": true, "EC-SD": true,
- "EC-SE": true, "EC-T": true, "EC-U": true, "EC-W": true, "EC-X": true,
- "EC-Y": true, "EC-Z": true, "EE-37": true, "EE-39": true, "EE-44": true, "EE-45": true,
- "EE-49": true, "EE-50": true, "EE-51": true, "EE-52": true, "EE-56": true, "EE-57": true,
- "EE-59": true, "EE-60": true, "EE-64": true, "EE-65": true, "EE-67": true, "EE-68": true,
- "EE-70": true, "EE-71": true, "EE-74": true, "EE-78": true, "EE-79": true, "EE-81": true, "EE-82": true,
- "EE-84": true, "EE-86": true, "EE-87": true, "EG-ALX": true, "EG-ASN": true, "EG-AST": true,
- "EG-BA": true, "EG-BH": true, "EG-BNS": true, "EG-C": true, "EG-DK": true,
- "EG-DT": true, "EG-FYM": true, "EG-GH": true, "EG-GZ": true, "EG-HU": true,
- "EG-IS": true, "EG-JS": true, "EG-KB": true, "EG-KFS": true, "EG-KN": true,
- "EG-LX": true, "EG-MN": true, "EG-MNF": true, "EG-MT": true, "EG-PTS": true, "EG-SHG": true,
- "EG-SHR": true, "EG-SIN": true, "EG-SU": true, "EG-SUZ": true, "EG-WAD": true,
- "ER-AN": true, "ER-DK": true, "ER-DU": true, "ER-GB": true, "ER-MA": true,
- "ER-SK": true, "ES-A": true, "ES-AB": true, "ES-AL": true, "ES-AN": true,
- "ES-AR": true, "ES-AS": true, "ES-AV": true, "ES-B": true, "ES-BA": true,
- "ES-BI": true, "ES-BU": true, "ES-C": true, "ES-CA": true, "ES-CB": true,
- "ES-CC": true, "ES-CE": true, "ES-CL": true, "ES-CM": true, "ES-CN": true,
- "ES-CO": true, "ES-CR": true, "ES-CS": true, "ES-CT": true, "ES-CU": true,
- "ES-EX": true, "ES-GA": true, "ES-GC": true, "ES-GI": true, "ES-GR": true,
- "ES-GU": true, "ES-H": true, "ES-HU": true, "ES-IB": true, "ES-J": true,
- "ES-L": true, "ES-LE": true, "ES-LO": true, "ES-LU": true, "ES-M": true,
- "ES-MA": true, "ES-MC": true, "ES-MD": true, "ES-ML": true, "ES-MU": true,
- "ES-NA": true, "ES-NC": true, "ES-O": true, "ES-OR": true, "ES-P": true,
- "ES-PM": true, "ES-PO": true, "ES-PV": true, "ES-RI": true, "ES-S": true,
- "ES-SA": true, "ES-SE": true, "ES-SG": true, "ES-SO": true, "ES-SS": true,
- "ES-T": true, "ES-TE": true, "ES-TF": true, "ES-TO": true, "ES-V": true,
- "ES-VA": true, "ES-VC": true, "ES-VI": true, "ES-Z": true, "ES-ZA": true,
- "ET-AA": true, "ET-AF": true, "ET-AM": true, "ET-BE": true, "ET-DD": true,
- "ET-GA": true, "ET-HA": true, "ET-OR": true, "ET-SN": true, "ET-SO": true,
- "ET-TI": true, "FI-01": true, "FI-02": true, "FI-03": true, "FI-04": true,
- "FI-05": true, "FI-06": true, "FI-07": true, "FI-08": true, "FI-09": true,
- "FI-10": true, "FI-11": true, "FI-12": true, "FI-13": true, "FI-14": true,
- "FI-15": true, "FI-16": true, "FI-17": true, "FI-18": true, "FI-19": true,
- "FJ-C": true, "FJ-E": true, "FJ-N": true, "FJ-R": true, "FJ-W": true,
- "FM-KSA": true, "FM-PNI": true, "FM-TRK": true, "FM-YAP": true, "FR-01": true,
- "FR-02": true, "FR-03": true, "FR-04": true, "FR-05": true, "FR-06": true,
- "FR-07": true, "FR-08": true, "FR-09": true, "FR-10": true, "FR-11": true,
- "FR-12": true, "FR-13": true, "FR-14": true, "FR-15": true, "FR-16": true,
- "FR-17": true, "FR-18": true, "FR-19": true, "FR-20R": true, "FR-21": true, "FR-22": true,
- "FR-23": true, "FR-24": true, "FR-25": true, "FR-26": true, "FR-27": true,
- "FR-28": true, "FR-29": true, "FR-2A": true, "FR-2B": true, "FR-30": true,
- "FR-31": true, "FR-32": true, "FR-33": true, "FR-34": true, "FR-35": true,
- "FR-36": true, "FR-37": true, "FR-38": true, "FR-39": true, "FR-40": true,
- "FR-41": true, "FR-42": true, "FR-43": true, "FR-44": true, "FR-45": true,
- "FR-46": true, "FR-47": true, "FR-48": true, "FR-49": true, "FR-50": true,
- "FR-51": true, "FR-52": true, "FR-53": true, "FR-54": true, "FR-55": true,
- "FR-56": true, "FR-57": true, "FR-58": true, "FR-59": true, "FR-60": true,
- "FR-61": true, "FR-62": true, "FR-63": true, "FR-64": true, "FR-65": true,
- "FR-66": true, "FR-67": true, "FR-68": true, "FR-69": true, "FR-70": true,
- "FR-71": true, "FR-72": true, "FR-73": true, "FR-74": true, "FR-75": true,
- "FR-76": true, "FR-77": true, "FR-78": true, "FR-79": true, "FR-80": true,
- "FR-81": true, "FR-82": true, "FR-83": true, "FR-84": true, "FR-85": true,
- "FR-86": true, "FR-87": true, "FR-88": true, "FR-89": true, "FR-90": true,
- "FR-91": true, "FR-92": true, "FR-93": true, "FR-94": true, "FR-95": true,
- "FR-ARA": true, "FR-BFC": true, "FR-BL": true, "FR-BRE": true, "FR-COR": true,
- "FR-CP": true, "FR-CVL": true, "FR-GES": true, "FR-GF": true, "FR-GP": true,
- "FR-GUA": true, "FR-HDF": true, "FR-IDF": true, "FR-LRE": true, "FR-MAY": true,
- "FR-MF": true, "FR-MQ": true, "FR-NAQ": true, "FR-NC": true, "FR-NOR": true,
- "FR-OCC": true, "FR-PAC": true, "FR-PDL": true, "FR-PF": true, "FR-PM": true,
- "FR-RE": true, "FR-TF": true, "FR-WF": true, "FR-YT": true, "GA-1": true,
- "GA-2": true, "GA-3": true, "GA-4": true, "GA-5": true, "GA-6": true,
- "GA-7": true, "GA-8": true, "GA-9": true, "GB-ABC": true, "GB-ABD": true,
- "GB-ABE": true, "GB-AGB": true, "GB-AGY": true, "GB-AND": true, "GB-ANN": true,
- "GB-ANS": true, "GB-BAS": true, "GB-BBD": true, "GB-BDF": true, "GB-BDG": true,
- "GB-BEN": true, "GB-BEX": true, "GB-BFS": true, "GB-BGE": true, "GB-BGW": true,
- "GB-BIR": true, "GB-BKM": true, "GB-BMH": true, "GB-BNE": true, "GB-BNH": true,
- "GB-BNS": true, "GB-BOL": true, "GB-BPL": true, "GB-BRC": true, "GB-BRD": true,
- "GB-BRY": true, "GB-BST": true, "GB-BUR": true, "GB-CAM": true, "GB-CAY": true,
- "GB-CBF": true, "GB-CCG": true, "GB-CGN": true, "GB-CHE": true, "GB-CHW": true,
- "GB-CLD": true, "GB-CLK": true, "GB-CMA": true, "GB-CMD": true, "GB-CMN": true,
- "GB-CON": true, "GB-COV": true, "GB-CRF": true, "GB-CRY": true, "GB-CWY": true,
- "GB-DAL": true, "GB-DBY": true, "GB-DEN": true, "GB-DER": true, "GB-DEV": true,
- "GB-DGY": true, "GB-DNC": true, "GB-DND": true, "GB-DOR": true, "GB-DRS": true,
- "GB-DUD": true, "GB-DUR": true, "GB-EAL": true, "GB-EAW": true, "GB-EAY": true,
- "GB-EDH": true, "GB-EDU": true, "GB-ELN": true, "GB-ELS": true, "GB-ENF": true,
- "GB-ENG": true, "GB-ERW": true, "GB-ERY": true, "GB-ESS": true, "GB-ESX": true,
- "GB-FAL": true, "GB-FIF": true, "GB-FLN": true, "GB-FMO": true, "GB-GAT": true,
- "GB-GBN": true, "GB-GLG": true, "GB-GLS": true, "GB-GRE": true, "GB-GWN": true,
- "GB-HAL": true, "GB-HAM": true, "GB-HAV": true, "GB-HCK": true, "GB-HEF": true,
- "GB-HIL": true, "GB-HLD": true, "GB-HMF": true, "GB-HNS": true, "GB-HPL": true,
- "GB-HRT": true, "GB-HRW": true, "GB-HRY": true, "GB-IOS": true, "GB-IOW": true,
- "GB-ISL": true, "GB-IVC": true, "GB-KEC": true, "GB-KEN": true, "GB-KHL": true,
- "GB-KIR": true, "GB-KTT": true, "GB-KWL": true, "GB-LAN": true, "GB-LBC": true,
- "GB-LBH": true, "GB-LCE": true, "GB-LDS": true, "GB-LEC": true, "GB-LEW": true,
- "GB-LIN": true, "GB-LIV": true, "GB-LND": true, "GB-LUT": true, "GB-MAN": true,
- "GB-MDB": true, "GB-MDW": true, "GB-MEA": true, "GB-MIK": true, "GD-01": true,
- "GB-MLN": true, "GB-MON": true, "GB-MRT": true, "GB-MRY": true, "GB-MTY": true,
- "GB-MUL": true, "GB-NAY": true, "GB-NBL": true, "GB-NEL": true, "GB-NET": true,
- "GB-NFK": true, "GB-NGM": true, "GB-NIR": true, "GB-NLK": true, "GB-NLN": true,
- "GB-NMD": true, "GB-NSM": true, "GB-NTH": true, "GB-NTL": true, "GB-NTT": true,
- "GB-NTY": true, "GB-NWM": true, "GB-NWP": true, "GB-NYK": true, "GB-OLD": true,
- "GB-ORK": true, "GB-OXF": true, "GB-PEM": true, "GB-PKN": true, "GB-PLY": true,
- "GB-POL": true, "GB-POR": true, "GB-POW": true, "GB-PTE": true, "GB-RCC": true,
- "GB-RCH": true, "GB-RCT": true, "GB-RDB": true, "GB-RDG": true, "GB-RFW": true,
- "GB-RIC": true, "GB-ROT": true, "GB-RUT": true, "GB-SAW": true, "GB-SAY": true,
- "GB-SCB": true, "GB-SCT": true, "GB-SFK": true, "GB-SFT": true, "GB-SGC": true,
- "GB-SHF": true, "GB-SHN": true, "GB-SHR": true, "GB-SKP": true, "GB-SLF": true,
- "GB-SLG": true, "GB-SLK": true, "GB-SND": true, "GB-SOL": true, "GB-SOM": true,
- "GB-SOS": true, "GB-SRY": true, "GB-STE": true, "GB-STG": true, "GB-STH": true,
- "GB-STN": true, "GB-STS": true, "GB-STT": true, "GB-STY": true, "GB-SWA": true,
- "GB-SWD": true, "GB-SWK": true, "GB-TAM": true, "GB-TFW": true, "GB-THR": true,
- "GB-TOB": true, "GB-TOF": true, "GB-TRF": true, "GB-TWH": true, "GB-UKM": true,
- "GB-VGL": true, "GB-WAR": true, "GB-WBK": true, "GB-WDU": true, "GB-WFT": true,
- "GB-WGN": true, "GB-WIL": true, "GB-WKF": true, "GB-WLL": true, "GB-WLN": true,
- "GB-WLS": true, "GB-WLV": true, "GB-WND": true, "GB-WNM": true, "GB-WOK": true,
- "GB-WOR": true, "GB-WRL": true, "GB-WRT": true, "GB-WRX": true, "GB-WSM": true,
- "GB-WSX": true, "GB-YOR": true, "GB-ZET": true, "GD-02": true, "GD-03": true,
- "GD-04": true, "GD-05": true, "GD-06": true, "GD-10": true, "GE-AB": true,
- "GE-AJ": true, "GE-GU": true, "GE-IM": true, "GE-KA": true, "GE-KK": true,
- "GE-MM": true, "GE-RL": true, "GE-SJ": true, "GE-SK": true, "GE-SZ": true,
- "GE-TB": true, "GH-AA": true, "GH-AH": true, "GH-AF": true, "GH-BA": true, "GH-BO": true, "GH-BE": true, "GH-CP": true,
- "GH-EP": true, "GH-NP": true, "GH-TV": true, "GH-UE": true, "GH-UW": true,
- "GH-WP": true, "GL-AV": true, "GL-KU": true, "GL-QA": true, "GL-QT": true, "GL-QE": true, "GL-SM": true,
- "GM-B": true, "GM-L": true, "GM-M": true, "GM-N": true, "GM-U": true,
- "GM-W": true, "GN-B": true, "GN-BE": true, "GN-BF": true, "GN-BK": true,
- "GN-C": true, "GN-CO": true, "GN-D": true, "GN-DB": true, "GN-DI": true,
- "GN-DL": true, "GN-DU": true, "GN-F": true, "GN-FA": true, "GN-FO": true,
- "GN-FR": true, "GN-GA": true, "GN-GU": true, "GN-K": true, "GN-KA": true,
- "GN-KB": true, "GN-KD": true, "GN-KE": true, "GN-KN": true, "GN-KO": true,
- "GN-KS": true, "GN-L": true, "GN-LA": true, "GN-LE": true, "GN-LO": true,
- "GN-M": true, "GN-MC": true, "GN-MD": true, "GN-ML": true, "GN-MM": true,
- "GN-N": true, "GN-NZ": true, "GN-PI": true, "GN-SI": true, "GN-TE": true,
- "GN-TO": true, "GN-YO": true, "GQ-AN": true, "GQ-BN": true, "GQ-BS": true,
- "GQ-C": true, "GQ-CS": true, "GQ-I": true, "GQ-KN": true, "GQ-LI": true,
- "GQ-WN": true, "GR-01": true, "GR-03": true, "GR-04": true, "GR-05": true,
- "GR-06": true, "GR-07": true, "GR-11": true, "GR-12": true, "GR-13": true,
- "GR-14": true, "GR-15": true, "GR-16": true, "GR-17": true, "GR-21": true,
- "GR-22": true, "GR-23": true, "GR-24": true, "GR-31": true, "GR-32": true,
- "GR-33": true, "GR-34": true, "GR-41": true, "GR-42": true, "GR-43": true,
- "GR-44": true, "GR-51": true, "GR-52": true, "GR-53": true, "GR-54": true,
- "GR-55": true, "GR-56": true, "GR-57": true, "GR-58": true, "GR-59": true,
- "GR-61": true, "GR-62": true, "GR-63": true, "GR-64": true, "GR-69": true,
- "GR-71": true, "GR-72": true, "GR-73": true, "GR-81": true, "GR-82": true,
- "GR-83": true, "GR-84": true, "GR-85": true, "GR-91": true, "GR-92": true,
- "GR-93": true, "GR-94": true, "GR-A": true, "GR-A1": true, "GR-B": true,
- "GR-C": true, "GR-D": true, "GR-E": true, "GR-F": true, "GR-G": true,
- "GR-H": true, "GR-I": true, "GR-J": true, "GR-K": true, "GR-L": true,
- "GR-M": true, "GT-01": true, "GT-02": true, "GT-03": true, "GT-04": true,
- "GT-05": true, "GT-06": true, "GT-07": true, "GT-08": true, "GT-09": true,
- "GT-10": true, "GT-11": true, "GT-12": true, "GT-13": true, "GT-14": true,
- "GT-15": true, "GT-16": true, "GT-17": true, "GT-18": true, "GT-19": true,
- "GT-20": true, "GT-21": true, "GT-22": true, "GW-BA": true, "GW-BL": true,
- "GW-BM": true, "GW-BS": true, "GW-CA": true, "GW-GA": true, "GW-L": true,
- "GW-N": true, "GW-OI": true, "GW-QU": true, "GW-S": true, "GW-TO": true,
- "GY-BA": true, "GY-CU": true, "GY-DE": true, "GY-EB": true, "GY-ES": true,
- "GY-MA": true, "GY-PM": true, "GY-PT": true, "GY-UD": true, "GY-UT": true,
- "HN-AT": true, "HN-CH": true, "HN-CL": true, "HN-CM": true, "HN-CP": true,
- "HN-CR": true, "HN-EP": true, "HN-FM": true, "HN-GD": true, "HN-IB": true,
- "HN-IN": true, "HN-LE": true, "HN-LP": true, "HN-OC": true, "HN-OL": true,
- "HN-SB": true, "HN-VA": true, "HN-YO": true, "HR-01": true, "HR-02": true,
- "HR-03": true, "HR-04": true, "HR-05": true, "HR-06": true, "HR-07": true,
- "HR-08": true, "HR-09": true, "HR-10": true, "HR-11": true, "HR-12": true,
- "HR-13": true, "HR-14": true, "HR-15": true, "HR-16": true, "HR-17": true,
- "HR-18": true, "HR-19": true, "HR-20": true, "HR-21": true, "HT-AR": true,
- "HT-CE": true, "HT-GA": true, "HT-ND": true, "HT-NE": true, "HT-NO": true, "HT-NI": true,
- "HT-OU": true, "HT-SD": true, "HT-SE": true, "HU-BA": true, "HU-BC": true,
- "HU-BE": true, "HU-BK": true, "HU-BU": true, "HU-BZ": true, "HU-CS": true,
- "HU-DE": true, "HU-DU": true, "HU-EG": true, "HU-ER": true, "HU-FE": true,
- "HU-GS": true, "HU-GY": true, "HU-HB": true, "HU-HE": true, "HU-HV": true,
- "HU-JN": true, "HU-KE": true, "HU-KM": true, "HU-KV": true, "HU-MI": true,
- "HU-NK": true, "HU-NO": true, "HU-NY": true, "HU-PE": true, "HU-PS": true,
- "HU-SD": true, "HU-SF": true, "HU-SH": true, "HU-SK": true, "HU-SN": true,
- "HU-SO": true, "HU-SS": true, "HU-ST": true, "HU-SZ": true, "HU-TB": true,
- "HU-TO": true, "HU-VA": true, "HU-VE": true, "HU-VM": true, "HU-ZA": true,
- "HU-ZE": true, "ID-AC": true, "ID-BA": true, "ID-BB": true, "ID-BE": true,
- "ID-BT": true, "ID-GO": true, "ID-IJ": true, "ID-JA": true, "ID-JB": true,
- "ID-JI": true, "ID-JK": true, "ID-JT": true, "ID-JW": true, "ID-KA": true,
- "ID-KB": true, "ID-KI": true, "ID-KU": true, "ID-KR": true, "ID-KS": true,
- "ID-KT": true, "ID-LA": true, "ID-MA": true, "ID-ML": true, "ID-MU": true,
- "ID-NB": true, "ID-NT": true, "ID-NU": true, "ID-PA": true, "ID-PB": true,
- "ID-PE": true, "ID-PP": true, "ID-PS": true, "ID-PT": true, "ID-RI": true,
- "ID-SA": true, "ID-SB": true, "ID-SG": true, "ID-SL": true, "ID-SM": true,
- "ID-SN": true, "ID-SR": true, "ID-SS": true, "ID-ST": true, "ID-SU": true,
- "ID-YO": true, "IE-C": true, "IE-CE": true, "IE-CN": true, "IE-CO": true,
- "IE-CW": true, "IE-D": true, "IE-DL": true, "IE-G": true, "IE-KE": true,
- "IE-KK": true, "IE-KY": true, "IE-L": true, "IE-LD": true, "IE-LH": true,
- "IE-LK": true, "IE-LM": true, "IE-LS": true, "IE-M": true, "IE-MH": true,
- "IE-MN": true, "IE-MO": true, "IE-OY": true, "IE-RN": true, "IE-SO": true,
- "IE-TA": true, "IE-U": true, "IE-WD": true, "IE-WH": true, "IE-WW": true,
- "IE-WX": true, "IL-D": true, "IL-HA": true, "IL-JM": true, "IL-M": true,
- "IL-TA": true, "IL-Z": true, "IN-AN": true, "IN-AP": true, "IN-AR": true,
- "IN-AS": true, "IN-BR": true, "IN-CH": true, "IN-CT": true, "IN-DH": true,
- "IN-DL": true, "IN-DN": true, "IN-GA": true, "IN-GJ": true, "IN-HP": true,
- "IN-HR": true, "IN-JH": true, "IN-JK": true, "IN-KA": true, "IN-KL": true,
- "IN-LD": true, "IN-MH": true, "IN-ML": true, "IN-MN": true, "IN-MP": true,
- "IN-MZ": true, "IN-NL": true, "IN-TG": true, "IN-OR": true, "IN-PB": true, "IN-PY": true,
- "IN-RJ": true, "IN-SK": true, "IN-TN": true, "IN-TR": true, "IN-UP": true,
- "IN-UT": true, "IN-WB": true, "IQ-AN": true, "IQ-AR": true, "IQ-BA": true,
- "IQ-BB": true, "IQ-BG": true, "IQ-DA": true, "IQ-DI": true, "IQ-DQ": true,
- "IQ-KA": true, "IQ-KI": true, "IQ-MA": true, "IQ-MU": true, "IQ-NA": true, "IQ-NI": true,
- "IQ-QA": true, "IQ-SD": true, "IQ-SW": true, "IQ-SU": true, "IQ-TS": true, "IQ-WA": true,
- "IR-00": true, "IR-01": true, "IR-02": true, "IR-03": true, "IR-04": true, "IR-05": true,
- "IR-06": true, "IR-07": true, "IR-08": true, "IR-09": true, "IR-10": true, "IR-11": true,
- "IR-12": true, "IR-13": true, "IR-14": true, "IR-15": true, "IR-16": true,
- "IR-17": true, "IR-18": true, "IR-19": true, "IR-20": true, "IR-21": true,
- "IR-22": true, "IR-23": true, "IR-24": true, "IR-25": true, "IR-26": true,
- "IR-27": true, "IR-28": true, "IR-29": true, "IR-30": true, "IR-31": true,
- "IS-0": true, "IS-1": true, "IS-2": true, "IS-3": true, "IS-4": true,
- "IS-5": true, "IS-6": true, "IS-7": true, "IS-8": true, "IT-21": true,
- "IT-23": true, "IT-25": true, "IT-32": true, "IT-34": true, "IT-36": true,
- "IT-42": true, "IT-45": true, "IT-52": true, "IT-55": true, "IT-57": true,
- "IT-62": true, "IT-65": true, "IT-67": true, "IT-72": true, "IT-75": true,
- "IT-77": true, "IT-78": true, "IT-82": true, "IT-88": true, "IT-AG": true,
- "IT-AL": true, "IT-AN": true, "IT-AO": true, "IT-AP": true, "IT-AQ": true,
- "IT-AR": true, "IT-AT": true, "IT-AV": true, "IT-BA": true, "IT-BG": true,
- "IT-BI": true, "IT-BL": true, "IT-BN": true, "IT-BO": true, "IT-BR": true,
- "IT-BS": true, "IT-BT": true, "IT-BZ": true, "IT-CA": true, "IT-CB": true,
- "IT-CE": true, "IT-CH": true, "IT-CI": true, "IT-CL": true, "IT-CN": true,
- "IT-CO": true, "IT-CR": true, "IT-CS": true, "IT-CT": true, "IT-CZ": true,
- "IT-EN": true, "IT-FC": true, "IT-FE": true, "IT-FG": true, "IT-FI": true,
- "IT-FM": true, "IT-FR": true, "IT-GE": true, "IT-GO": true, "IT-GR": true,
- "IT-IM": true, "IT-IS": true, "IT-KR": true, "IT-LC": true, "IT-LE": true,
- "IT-LI": true, "IT-LO": true, "IT-LT": true, "IT-LU": true, "IT-MB": true,
- "IT-MC": true, "IT-ME": true, "IT-MI": true, "IT-MN": true, "IT-MO": true,
- "IT-MS": true, "IT-MT": true, "IT-NA": true, "IT-NO": true, "IT-NU": true,
- "IT-OG": true, "IT-OR": true, "IT-OT": true, "IT-PA": true, "IT-PC": true,
- "IT-PD": true, "IT-PE": true, "IT-PG": true, "IT-PI": true, "IT-PN": true,
- "IT-PO": true, "IT-PR": true, "IT-PT": true, "IT-PU": true, "IT-PV": true,
- "IT-PZ": true, "IT-RA": true, "IT-RC": true, "IT-RE": true, "IT-RG": true,
- "IT-RI": true, "IT-RM": true, "IT-RN": true, "IT-RO": true, "IT-SA": true,
- "IT-SI": true, "IT-SO": true, "IT-SP": true, "IT-SR": true, "IT-SS": true,
- "IT-SV": true, "IT-TA": true, "IT-TE": true, "IT-TN": true, "IT-TO": true,
- "IT-TP": true, "IT-TR": true, "IT-TS": true, "IT-TV": true, "IT-UD": true,
- "IT-VA": true, "IT-VB": true, "IT-VC": true, "IT-VE": true, "IT-VI": true,
- "IT-VR": true, "IT-VS": true, "IT-VT": true, "IT-VV": true, "JM-01": true,
- "JM-02": true, "JM-03": true, "JM-04": true, "JM-05": true, "JM-06": true,
- "JM-07": true, "JM-08": true, "JM-09": true, "JM-10": true, "JM-11": true,
- "JM-12": true, "JM-13": true, "JM-14": true, "JO-AJ": true, "JO-AM": true,
- "JO-AQ": true, "JO-AT": true, "JO-AZ": true, "JO-BA": true, "JO-IR": true,
- "JO-JA": true, "JO-KA": true, "JO-MA": true, "JO-MD": true, "JO-MN": true,
- "JP-01": true, "JP-02": true, "JP-03": true, "JP-04": true, "JP-05": true,
- "JP-06": true, "JP-07": true, "JP-08": true, "JP-09": true, "JP-10": true,
- "JP-11": true, "JP-12": true, "JP-13": true, "JP-14": true, "JP-15": true,
- "JP-16": true, "JP-17": true, "JP-18": true, "JP-19": true, "JP-20": true,
- "JP-21": true, "JP-22": true, "JP-23": true, "JP-24": true, "JP-25": true,
- "JP-26": true, "JP-27": true, "JP-28": true, "JP-29": true, "JP-30": true,
- "JP-31": true, "JP-32": true, "JP-33": true, "JP-34": true, "JP-35": true,
- "JP-36": true, "JP-37": true, "JP-38": true, "JP-39": true, "JP-40": true,
- "JP-41": true, "JP-42": true, "JP-43": true, "JP-44": true, "JP-45": true,
- "JP-46": true, "JP-47": true, "KE-01": true, "KE-02": true, "KE-03": true,
- "KE-04": true, "KE-05": true, "KE-06": true, "KE-07": true, "KE-08": true,
- "KE-09": true, "KE-10": true, "KE-11": true, "KE-12": true, "KE-13": true,
- "KE-14": true, "KE-15": true, "KE-16": true, "KE-17": true, "KE-18": true,
- "KE-19": true, "KE-20": true, "KE-21": true, "KE-22": true, "KE-23": true,
- "KE-24": true, "KE-25": true, "KE-26": true, "KE-27": true, "KE-28": true,
- "KE-29": true, "KE-30": true, "KE-31": true, "KE-32": true, "KE-33": true,
- "KE-34": true, "KE-35": true, "KE-36": true, "KE-37": true, "KE-38": true,
- "KE-39": true, "KE-40": true, "KE-41": true, "KE-42": true, "KE-43": true,
- "KE-44": true, "KE-45": true, "KE-46": true, "KE-47": true, "KG-B": true,
- "KG-C": true, "KG-GB": true, "KG-GO": true, "KG-J": true, "KG-N": true, "KG-O": true,
- "KG-T": true, "KG-Y": true, "KH-1": true, "KH-10": true, "KH-11": true,
- "KH-12": true, "KH-13": true, "KH-14": true, "KH-15": true, "KH-16": true,
- "KH-17": true, "KH-18": true, "KH-19": true, "KH-2": true, "KH-20": true,
- "KH-21": true, "KH-22": true, "KH-23": true, "KH-24": true, "KH-3": true,
- "KH-4": true, "KH-5": true, "KH-6": true, "KH-7": true, "KH-8": true,
- "KH-9": true, "KI-G": true, "KI-L": true, "KI-P": true, "KM-A": true,
- "KM-G": true, "KM-M": true, "KN-01": true, "KN-02": true, "KN-03": true,
- "KN-04": true, "KN-05": true, "KN-06": true, "KN-07": true, "KN-08": true,
- "KN-09": true, "KN-10": true, "KN-11": true, "KN-12": true, "KN-13": true,
- "KN-15": true, "KN-K": true, "KN-N": true, "KP-01": true, "KP-02": true,
- "KP-03": true, "KP-04": true, "KP-05": true, "KP-06": true, "KP-07": true,
- "KP-08": true, "KP-09": true, "KP-10": true, "KP-13": true, "KR-11": true,
- "KR-26": true, "KR-27": true, "KR-28": true, "KR-29": true, "KR-30": true,
- "KR-31": true, "KR-41": true, "KR-42": true, "KR-43": true, "KR-44": true,
- "KR-45": true, "KR-46": true, "KR-47": true, "KR-48": true, "KR-49": true,
- "KW-AH": true, "KW-FA": true, "KW-HA": true, "KW-JA": true, "KW-KU": true,
- "KW-MU": true, "KZ-10": true, "KZ-75": true, "KZ-19": true, "KZ-11": true,
- "KZ-15": true, "KZ-71": true, "KZ-23": true, "KZ-27": true, "KZ-47": true,
- "KZ-55": true, "KZ-35": true, "KZ-39": true, "KZ-43": true, "KZ-63": true,
- "KZ-79": true, "KZ-59": true, "KZ-61": true, "KZ-62": true, "KZ-31": true,
- "KZ-33": true, "LA-AT": true, "LA-BK": true, "LA-BL": true,
- "LA-CH": true, "LA-HO": true, "LA-KH": true, "LA-LM": true, "LA-LP": true,
- "LA-OU": true, "LA-PH": true, "LA-SL": true, "LA-SV": true, "LA-VI": true,
- "LA-VT": true, "LA-XA": true, "LA-XE": true, "LA-XI": true, "LA-XS": true,
- "LB-AK": true, "LB-AS": true, "LB-BA": true, "LB-BH": true, "LB-BI": true,
- "LB-JA": true, "LB-JL": true, "LB-NA": true, "LC-01": true, "LC-02": true,
- "LC-03": true, "LC-05": true, "LC-06": true, "LC-07": true, "LC-08": true,
- "LC-10": true, "LC-11": true, "LI-01": true, "LI-02": true,
- "LI-03": true, "LI-04": true, "LI-05": true, "LI-06": true, "LI-07": true,
- "LI-08": true, "LI-09": true, "LI-10": true, "LI-11": true, "LK-1": true,
- "LK-11": true, "LK-12": true, "LK-13": true, "LK-2": true, "LK-21": true,
- "LK-22": true, "LK-23": true, "LK-3": true, "LK-31": true, "LK-32": true,
- "LK-33": true, "LK-4": true, "LK-41": true, "LK-42": true, "LK-43": true,
- "LK-44": true, "LK-45": true, "LK-5": true, "LK-51": true, "LK-52": true,
- "LK-53": true, "LK-6": true, "LK-61": true, "LK-62": true, "LK-7": true,
- "LK-71": true, "LK-72": true, "LK-8": true, "LK-81": true, "LK-82": true,
- "LK-9": true, "LK-91": true, "LK-92": true, "LR-BG": true, "LR-BM": true,
- "LR-CM": true, "LR-GB": true, "LR-GG": true, "LR-GK": true, "LR-LO": true,
- "LR-MG": true, "LR-MO": true, "LR-MY": true, "LR-NI": true, "LR-RI": true,
- "LR-SI": true, "LS-A": true, "LS-B": true, "LS-C": true, "LS-D": true,
- "LS-E": true, "LS-F": true, "LS-G": true, "LS-H": true, "LS-J": true,
- "LS-K": true, "LT-AL": true, "LT-KL": true, "LT-KU": true, "LT-MR": true,
- "LT-PN": true, "LT-SA": true, "LT-TA": true, "LT-TE": true, "LT-UT": true,
- "LT-VL": true, "LU-CA": true, "LU-CL": true, "LU-DI": true, "LU-EC": true,
- "LU-ES": true, "LU-GR": true, "LU-LU": true, "LU-ME": true, "LU-RD": true,
- "LU-RM": true, "LU-VD": true, "LU-WI": true, "LU-D": true, "LU-G": true, "LU-L": true,
- "LV-001": true, "LV-111": true, "LV-112": true, "LV-113": true,
- "LV-002": true, "LV-003": true, "LV-004": true, "LV-005": true, "LV-006": true,
- "LV-007": true, "LV-008": true, "LV-009": true, "LV-010": true, "LV-011": true,
- "LV-012": true, "LV-013": true, "LV-014": true, "LV-015": true, "LV-016": true,
- "LV-017": true, "LV-018": true, "LV-019": true, "LV-020": true, "LV-021": true,
- "LV-022": true, "LV-023": true, "LV-024": true, "LV-025": true, "LV-026": true,
- "LV-027": true, "LV-028": true, "LV-029": true, "LV-030": true, "LV-031": true,
- "LV-032": true, "LV-033": true, "LV-034": true, "LV-035": true, "LV-036": true,
- "LV-037": true, "LV-038": true, "LV-039": true, "LV-040": true, "LV-041": true,
- "LV-042": true, "LV-043": true, "LV-044": true, "LV-045": true, "LV-046": true,
- "LV-047": true, "LV-048": true, "LV-049": true, "LV-050": true, "LV-051": true,
- "LV-052": true, "LV-053": true, "LV-054": true, "LV-055": true, "LV-056": true,
- "LV-057": true, "LV-058": true, "LV-059": true, "LV-060": true, "LV-061": true,
- "LV-062": true, "LV-063": true, "LV-064": true, "LV-065": true, "LV-066": true,
- "LV-067": true, "LV-068": true, "LV-069": true, "LV-070": true, "LV-071": true,
- "LV-072": true, "LV-073": true, "LV-074": true, "LV-075": true, "LV-076": true,
- "LV-077": true, "LV-078": true, "LV-079": true, "LV-080": true, "LV-081": true,
- "LV-082": true, "LV-083": true, "LV-084": true, "LV-085": true, "LV-086": true,
- "LV-087": true, "LV-088": true, "LV-089": true, "LV-090": true, "LV-091": true,
- "LV-092": true, "LV-093": true, "LV-094": true, "LV-095": true, "LV-096": true,
- "LV-097": true, "LV-098": true, "LV-099": true, "LV-100": true, "LV-101": true,
- "LV-102": true, "LV-103": true, "LV-104": true, "LV-105": true, "LV-106": true,
- "LV-107": true, "LV-108": true, "LV-109": true, "LV-110": true, "LV-DGV": true,
- "LV-JEL": true, "LV-JKB": true, "LV-JUR": true, "LV-LPX": true, "LV-REZ": true,
- "LV-RIX": true, "LV-VEN": true, "LV-VMR": true, "LY-BA": true, "LY-BU": true,
- "LY-DR": true, "LY-GT": true, "LY-JA": true, "LY-JB": true, "LY-JG": true,
- "LY-JI": true, "LY-JU": true, "LY-KF": true, "LY-MB": true, "LY-MI": true,
- "LY-MJ": true, "LY-MQ": true, "LY-NL": true, "LY-NQ": true, "LY-SB": true,
- "LY-SR": true, "LY-TB": true, "LY-WA": true, "LY-WD": true, "LY-WS": true,
- "LY-ZA": true, "MA-01": true, "MA-02": true, "MA-03": true, "MA-04": true,
- "MA-05": true, "MA-06": true, "MA-07": true, "MA-08": true, "MA-09": true,
- "MA-10": true, "MA-11": true, "MA-12": true, "MA-13": true, "MA-14": true,
- "MA-15": true, "MA-16": true, "MA-AGD": true, "MA-AOU": true, "MA-ASZ": true,
- "MA-AZI": true, "MA-BEM": true, "MA-BER": true, "MA-BES": true, "MA-BOD": true,
- "MA-BOM": true, "MA-CAS": true, "MA-CHE": true, "MA-CHI": true, "MA-CHT": true,
- "MA-ERR": true, "MA-ESI": true, "MA-ESM": true, "MA-FAH": true, "MA-FES": true,
- "MA-FIG": true, "MA-GUE": true, "MA-HAJ": true, "MA-HAO": true, "MA-HOC": true,
- "MA-IFR": true, "MA-INE": true, "MA-JDI": true, "MA-JRA": true, "MA-KEN": true,
- "MA-KES": true, "MA-KHE": true, "MA-KHN": true, "MA-KHO": true, "MA-LAA": true,
- "MA-LAR": true, "MA-MED": true, "MA-MEK": true, "MA-MMD": true, "MA-MMN": true,
- "MA-MOH": true, "MA-MOU": true, "MA-NAD": true, "MA-NOU": true, "MA-OUA": true,
- "MA-OUD": true, "MA-OUJ": true, "MA-RAB": true, "MA-SAF": true, "MA-SAL": true,
- "MA-SEF": true, "MA-SET": true, "MA-SIK": true, "MA-SKH": true, "MA-SYB": true,
- "MA-TAI": true, "MA-TAO": true, "MA-TAR": true, "MA-TAT": true, "MA-TAZ": true,
- "MA-TET": true, "MA-TIZ": true, "MA-TNG": true, "MA-TNT": true, "MA-ZAG": true,
- "MC-CL": true, "MC-CO": true, "MC-FO": true, "MC-GA": true, "MC-JE": true,
- "MC-LA": true, "MC-MA": true, "MC-MC": true, "MC-MG": true, "MC-MO": true,
- "MC-MU": true, "MC-PH": true, "MC-SD": true, "MC-SO": true, "MC-SP": true,
- "MC-SR": true, "MC-VR": true, "MD-AN": true, "MD-BA": true, "MD-BD": true,
- "MD-BR": true, "MD-BS": true, "MD-CA": true, "MD-CL": true, "MD-CM": true,
- "MD-CR": true, "MD-CS": true, "MD-CT": true, "MD-CU": true, "MD-DO": true,
- "MD-DR": true, "MD-DU": true, "MD-ED": true, "MD-FA": true, "MD-FL": true,
- "MD-GA": true, "MD-GL": true, "MD-HI": true, "MD-IA": true, "MD-LE": true,
- "MD-NI": true, "MD-OC": true, "MD-OR": true, "MD-RE": true, "MD-RI": true,
- "MD-SD": true, "MD-SI": true, "MD-SN": true, "MD-SO": true, "MD-ST": true,
- "MD-SV": true, "MD-TA": true, "MD-TE": true, "MD-UN": true, "ME-01": true,
- "ME-02": true, "ME-03": true, "ME-04": true, "ME-05": true, "ME-06": true,
- "ME-07": true, "ME-08": true, "ME-09": true, "ME-10": true, "ME-11": true,
- "ME-12": true, "ME-13": true, "ME-14": true, "ME-15": true, "ME-16": true,
- "ME-17": true, "ME-18": true, "ME-19": true, "ME-20": true, "ME-21": true, "ME-24": true,
- "MG-A": true, "MG-D": true, "MG-F": true, "MG-M": true, "MG-T": true,
- "MG-U": true, "MH-ALK": true, "MH-ALL": true, "MH-ARN": true, "MH-AUR": true,
- "MH-EBO": true, "MH-ENI": true, "MH-JAB": true, "MH-JAL": true, "MH-KIL": true,
- "MH-KWA": true, "MH-L": true, "MH-LAE": true, "MH-LIB": true, "MH-LIK": true,
- "MH-MAJ": true, "MH-MAL": true, "MH-MEJ": true, "MH-MIL": true, "MH-NMK": true,
- "MH-NMU": true, "MH-RON": true, "MH-T": true, "MH-UJA": true, "MH-UTI": true,
- "MH-WTJ": true, "MH-WTN": true, "MK-101": true, "MK-102": true, "MK-103": true,
- "MK-104": true, "MK-105": true,
- "MK-106": true, "MK-107": true, "MK-108": true, "MK-109": true, "MK-201": true,
- "MK-202": true, "MK-205": true, "MK-206": true, "MK-207": true, "MK-208": true,
- "MK-209": true, "MK-210": true, "MK-211": true, "MK-301": true, "MK-303": true,
- "MK-307": true, "MK-308": true, "MK-310": true, "MK-311": true, "MK-312": true,
- "MK-401": true, "MK-402": true, "MK-403": true, "MK-404": true, "MK-405": true,
- "MK-406": true, "MK-408": true, "MK-409": true, "MK-410": true, "MK-501": true,
- "MK-502": true, "MK-503": true, "MK-505": true, "MK-506": true, "MK-507": true,
- "MK-508": true, "MK-509": true, "MK-601": true, "MK-602": true, "MK-604": true,
- "MK-605": true, "MK-606": true, "MK-607": true, "MK-608": true, "MK-609": true,
- "MK-701": true, "MK-702": true, "MK-703": true, "MK-704": true, "MK-705": true,
- "MK-803": true, "MK-804": true, "MK-806": true, "MK-807": true, "MK-809": true,
- "MK-810": true, "MK-811": true, "MK-812": true, "MK-813": true, "MK-814": true,
- "MK-816": true, "ML-1": true, "ML-2": true, "ML-3": true, "ML-4": true,
- "ML-5": true, "ML-6": true, "ML-7": true, "ML-8": true, "ML-BKO": true,
- "MM-01": true, "MM-02": true, "MM-03": true, "MM-04": true, "MM-05": true,
- "MM-06": true, "MM-07": true, "MM-11": true, "MM-12": true, "MM-13": true,
- "MM-14": true, "MM-15": true, "MM-16": true, "MM-17": true, "MM-18": true, "MN-035": true,
- "MN-037": true, "MN-039": true, "MN-041": true, "MN-043": true, "MN-046": true,
- "MN-047": true, "MN-049": true, "MN-051": true, "MN-053": true, "MN-055": true,
- "MN-057": true, "MN-059": true, "MN-061": true, "MN-063": true, "MN-064": true,
- "MN-065": true, "MN-067": true, "MN-069": true, "MN-071": true, "MN-073": true,
- "MN-1": true, "MR-01": true, "MR-02": true, "MR-03": true, "MR-04": true,
- "MR-05": true, "MR-06": true, "MR-07": true, "MR-08": true, "MR-09": true,
- "MR-10": true, "MR-11": true, "MR-12": true, "MR-13": true, "MR-NKC": true, "MT-01": true,
- "MT-02": true, "MT-03": true, "MT-04": true, "MT-05": true, "MT-06": true,
- "MT-07": true, "MT-08": true, "MT-09": true, "MT-10": true, "MT-11": true,
- "MT-12": true, "MT-13": true, "MT-14": true, "MT-15": true, "MT-16": true,
- "MT-17": true, "MT-18": true, "MT-19": true, "MT-20": true, "MT-21": true,
- "MT-22": true, "MT-23": true, "MT-24": true, "MT-25": true, "MT-26": true,
- "MT-27": true, "MT-28": true, "MT-29": true, "MT-30": true, "MT-31": true,
- "MT-32": true, "MT-33": true, "MT-34": true, "MT-35": true, "MT-36": true,
- "MT-37": true, "MT-38": true, "MT-39": true, "MT-40": true, "MT-41": true,
- "MT-42": true, "MT-43": true, "MT-44": true, "MT-45": true, "MT-46": true,
- "MT-47": true, "MT-48": true, "MT-49": true, "MT-50": true, "MT-51": true,
- "MT-52": true, "MT-53": true, "MT-54": true, "MT-55": true, "MT-56": true,
- "MT-57": true, "MT-58": true, "MT-59": true, "MT-60": true, "MT-61": true,
- "MT-62": true, "MT-63": true, "MT-64": true, "MT-65": true, "MT-66": true,
- "MT-67": true, "MT-68": true, "MU-AG": true, "MU-BL": true, "MU-BR": true,
- "MU-CC": true, "MU-CU": true, "MU-FL": true, "MU-GP": true, "MU-MO": true,
- "MU-PA": true, "MU-PL": true, "MU-PU": true, "MU-PW": true, "MU-QB": true,
- "MU-RO": true, "MU-RP": true, "MU-RR": true, "MU-SA": true, "MU-VP": true, "MV-00": true,
- "MV-01": true, "MV-02": true, "MV-03": true, "MV-04": true, "MV-05": true,
- "MV-07": true, "MV-08": true, "MV-12": true, "MV-13": true, "MV-14": true,
- "MV-17": true, "MV-20": true, "MV-23": true, "MV-24": true, "MV-25": true,
- "MV-26": true, "MV-27": true, "MV-28": true, "MV-29": true, "MV-CE": true,
- "MV-MLE": true, "MV-NC": true, "MV-NO": true, "MV-SC": true, "MV-SU": true,
- "MV-UN": true, "MV-US": true, "MW-BA": true, "MW-BL": true, "MW-C": true,
- "MW-CK": true, "MW-CR": true, "MW-CT": true, "MW-DE": true, "MW-DO": true,
- "MW-KR": true, "MW-KS": true, "MW-LI": true, "MW-LK": true, "MW-MC": true,
- "MW-MG": true, "MW-MH": true, "MW-MU": true, "MW-MW": true, "MW-MZ": true,
- "MW-N": true, "MW-NB": true, "MW-NE": true, "MW-NI": true, "MW-NK": true,
- "MW-NS": true, "MW-NU": true, "MW-PH": true, "MW-RU": true, "MW-S": true,
- "MW-SA": true, "MW-TH": true, "MW-ZO": true, "MX-AGU": true, "MX-BCN": true,
- "MX-BCS": true, "MX-CAM": true, "MX-CHH": true, "MX-CHP": true, "MX-COA": true,
- "MX-COL": true, "MX-CMX": true, "MX-DIF": true, "MX-DUR": true, "MX-GRO": true, "MX-GUA": true,
- "MX-HID": true, "MX-JAL": true, "MX-MEX": true, "MX-MIC": true, "MX-MOR": true,
- "MX-NAY": true, "MX-NLE": true, "MX-OAX": true, "MX-PUE": true, "MX-QUE": true,
- "MX-ROO": true, "MX-SIN": true, "MX-SLP": true, "MX-SON": true, "MX-TAB": true,
- "MX-TAM": true, "MX-TLA": true, "MX-VER": true, "MX-YUC": true, "MX-ZAC": true,
- "MY-01": true, "MY-02": true, "MY-03": true, "MY-04": true, "MY-05": true,
- "MY-06": true, "MY-07": true, "MY-08": true, "MY-09": true, "MY-10": true,
- "MY-11": true, "MY-12": true, "MY-13": true, "MY-14": true, "MY-15": true,
- "MY-16": true, "MZ-A": true, "MZ-B": true, "MZ-G": true, "MZ-I": true,
- "MZ-L": true, "MZ-MPM": true, "MZ-N": true, "MZ-P": true, "MZ-Q": true,
- "MZ-S": true, "MZ-T": true, "NA-CA": true, "NA-ER": true, "NA-HA": true,
- "NA-KA": true, "NA-KE": true, "NA-KH": true, "NA-KU": true, "NA-KW": true, "NA-OD": true, "NA-OH": true,
- "NA-OK": true, "NA-ON": true, "NA-OS": true, "NA-OT": true, "NA-OW": true,
- "NE-1": true, "NE-2": true, "NE-3": true, "NE-4": true, "NE-5": true,
- "NE-6": true, "NE-7": true, "NE-8": true, "NG-AB": true, "NG-AD": true,
- "NG-AK": true, "NG-AN": true, "NG-BA": true, "NG-BE": true, "NG-BO": true,
- "NG-BY": true, "NG-CR": true, "NG-DE": true, "NG-EB": true, "NG-ED": true,
- "NG-EK": true, "NG-EN": true, "NG-FC": true, "NG-GO": true, "NG-IM": true,
- "NG-JI": true, "NG-KD": true, "NG-KE": true, "NG-KN": true, "NG-KO": true,
- "NG-KT": true, "NG-KW": true, "NG-LA": true, "NG-NA": true, "NG-NI": true,
- "NG-OG": true, "NG-ON": true, "NG-OS": true, "NG-OY": true, "NG-PL": true,
- "NG-RI": true, "NG-SO": true, "NG-TA": true, "NG-YO": true, "NG-ZA": true,
- "NI-AN": true, "NI-AS": true, "NI-BO": true, "NI-CA": true, "NI-CI": true,
- "NI-CO": true, "NI-ES": true, "NI-GR": true, "NI-JI": true, "NI-LE": true,
- "NI-MD": true, "NI-MN": true, "NI-MS": true, "NI-MT": true, "NI-NS": true,
- "NI-RI": true, "NI-SJ": true, "NL-AW": true, "NL-BQ1": true, "NL-BQ2": true,
- "NL-BQ3": true, "NL-CW": true, "NL-DR": true, "NL-FL": true, "NL-FR": true,
- "NL-GE": true, "NL-GR": true, "NL-LI": true, "NL-NB": true, "NL-NH": true,
- "NL-OV": true, "NL-SX": true, "NL-UT": true, "NL-ZE": true, "NL-ZH": true,
- "NO-03": true, "NO-11": true, "NO-15": true, "NO-16": true, "NO-17": true,
- "NO-18": true, "NO-21": true, "NO-30": true, "NO-34": true, "NO-38": true,
- "NO-42": true, "NO-46": true, "NO-50": true, "NO-54": true,
- "NO-22": true, "NP-1": true, "NP-2": true, "NP-3": true, "NP-4": true,
- "NP-5": true, "NP-BA": true, "NP-BH": true, "NP-DH": true, "NP-GA": true,
- "NP-JA": true, "NP-KA": true, "NP-KO": true, "NP-LU": true, "NP-MA": true,
- "NP-ME": true, "NP-NA": true, "NP-RA": true, "NP-SA": true, "NP-SE": true,
- "NR-01": true, "NR-02": true, "NR-03": true, "NR-04": true, "NR-05": true,
- "NR-06": true, "NR-07": true, "NR-08": true, "NR-09": true, "NR-10": true,
- "NR-11": true, "NR-12": true, "NR-13": true, "NR-14": true, "NZ-AUK": true,
- "NZ-BOP": true, "NZ-CAN": true, "NZ-CIT": true, "NZ-GIS": true, "NZ-HKB": true,
- "NZ-MBH": true, "NZ-MWT": true, "NZ-N": true, "NZ-NSN": true, "NZ-NTL": true,
- "NZ-OTA": true, "NZ-S": true, "NZ-STL": true, "NZ-TAS": true, "NZ-TKI": true,
- "NZ-WGN": true, "NZ-WKO": true, "NZ-WTC": true, "OM-BA": true, "OM-BS": true, "OM-BU": true, "OM-BJ": true,
- "OM-DA": true, "OM-MA": true, "OM-MU": true, "OM-SH": true, "OM-SJ": true, "OM-SS": true, "OM-WU": true,
- "OM-ZA": true, "OM-ZU": true, "PA-1": true, "PA-2": true, "PA-3": true,
- "PA-4": true, "PA-5": true, "PA-6": true, "PA-7": true, "PA-8": true,
- "PA-9": true, "PA-EM": true, "PA-KY": true, "PA-NB": true, "PE-AMA": true,
- "PE-ANC": true, "PE-APU": true, "PE-ARE": true, "PE-AYA": true, "PE-CAJ": true,
- "PE-CAL": true, "PE-CUS": true, "PE-HUC": true, "PE-HUV": true, "PE-ICA": true,
- "PE-JUN": true, "PE-LAL": true, "PE-LAM": true, "PE-LIM": true, "PE-LMA": true,
- "PE-LOR": true, "PE-MDD": true, "PE-MOQ": true, "PE-PAS": true, "PE-PIU": true,
- "PE-PUN": true, "PE-SAM": true, "PE-TAC": true, "PE-TUM": true, "PE-UCA": true,
- "PG-CPK": true, "PG-CPM": true, "PG-EBR": true, "PG-EHG": true, "PG-EPW": true,
- "PG-ESW": true, "PG-GPK": true, "PG-MBA": true, "PG-MPL": true, "PG-MPM": true,
- "PG-MRL": true, "PG-NCD": true, "PG-NIK": true, "PG-NPP": true, "PG-NSB": true,
- "PG-SAN": true, "PG-SHM": true, "PG-WBK": true, "PG-WHM": true, "PG-WPD": true,
- "PH-00": true, "PH-01": true, "PH-02": true, "PH-03": true, "PH-05": true,
- "PH-06": true, "PH-07": true, "PH-08": true, "PH-09": true, "PH-10": true,
- "PH-11": true, "PH-12": true, "PH-13": true, "PH-14": true, "PH-15": true,
- "PH-40": true, "PH-41": true, "PH-ABR": true, "PH-AGN": true, "PH-AGS": true,
- "PH-AKL": true, "PH-ALB": true, "PH-ANT": true, "PH-APA": true, "PH-AUR": true,
- "PH-BAN": true, "PH-BAS": true, "PH-BEN": true, "PH-BIL": true, "PH-BOH": true,
- "PH-BTG": true, "PH-BTN": true, "PH-BUK": true, "PH-BUL": true, "PH-CAG": true,
- "PH-CAM": true, "PH-CAN": true, "PH-CAP": true, "PH-CAS": true, "PH-CAT": true,
- "PH-CAV": true, "PH-CEB": true, "PH-COM": true, "PH-DAO": true, "PH-DAS": true,
- "PH-DAV": true, "PH-DIN": true, "PH-EAS": true, "PH-GUI": true, "PH-IFU": true,
- "PH-ILI": true, "PH-ILN": true, "PH-ILS": true, "PH-ISA": true, "PH-KAL": true,
- "PH-LAG": true, "PH-LAN": true, "PH-LAS": true, "PH-LEY": true, "PH-LUN": true,
- "PH-MAD": true, "PH-MAG": true, "PH-MAS": true, "PH-MDC": true, "PH-MDR": true,
- "PH-MOU": true, "PH-MSC": true, "PH-MSR": true, "PH-NCO": true, "PH-NEC": true,
- "PH-NER": true, "PH-NSA": true, "PH-NUE": true, "PH-NUV": true, "PH-PAM": true,
- "PH-PAN": true, "PH-PLW": true, "PH-QUE": true, "PH-QUI": true, "PH-RIZ": true,
- "PH-ROM": true, "PH-SAR": true, "PH-SCO": true, "PH-SIG": true, "PH-SLE": true,
- "PH-SLU": true, "PH-SOR": true, "PH-SUK": true, "PH-SUN": true, "PH-SUR": true,
- "PH-TAR": true, "PH-TAW": true, "PH-WSA": true, "PH-ZAN": true, "PH-ZAS": true,
- "PH-ZMB": true, "PH-ZSI": true, "PK-BA": true, "PK-GB": true, "PK-IS": true,
- "PK-JK": true, "PK-KP": true, "PK-PB": true, "PK-SD": true, "PK-TA": true,
- "PL-02": true, "PL-04": true, "PL-06": true, "PL-08": true, "PL-10": true,
- "PL-12": true, "PL-14": true, "PL-16": true, "PL-18": true, "PL-20": true,
- "PL-22": true, "PL-24": true, "PL-26": true, "PL-28": true, "PL-30": true, "PL-32": true,
- "PS-BTH": true, "PS-DEB": true, "PS-GZA": true, "PS-HBN": true,
- "PS-JEM": true, "PS-JEN": true, "PS-JRH": true, "PS-KYS": true, "PS-NBS": true,
- "PS-NGZ": true, "PS-QQA": true, "PS-RBH": true, "PS-RFH": true, "PS-SLT": true,
- "PS-TBS": true, "PS-TKM": true, "PT-01": true, "PT-02": true, "PT-03": true,
- "PT-04": true, "PT-05": true, "PT-06": true, "PT-07": true, "PT-08": true,
- "PT-09": true, "PT-10": true, "PT-11": true, "PT-12": true, "PT-13": true,
- "PT-14": true, "PT-15": true, "PT-16": true, "PT-17": true, "PT-18": true,
- "PT-20": true, "PT-30": true, "PW-002": true, "PW-004": true, "PW-010": true,
- "PW-050": true, "PW-100": true, "PW-150": true, "PW-212": true, "PW-214": true,
- "PW-218": true, "PW-222": true, "PW-224": true, "PW-226": true, "PW-227": true,
- "PW-228": true, "PW-350": true, "PW-370": true, "PY-1": true, "PY-10": true,
- "PY-11": true, "PY-12": true, "PY-13": true, "PY-14": true, "PY-15": true,
- "PY-16": true, "PY-19": true, "PY-2": true, "PY-3": true, "PY-4": true,
- "PY-5": true, "PY-6": true, "PY-7": true, "PY-8": true, "PY-9": true,
- "PY-ASU": true, "QA-DA": true, "QA-KH": true, "QA-MS": true, "QA-RA": true,
- "QA-US": true, "QA-WA": true, "QA-ZA": true, "RO-AB": true, "RO-AG": true,
- "RO-AR": true, "RO-B": true, "RO-BC": true, "RO-BH": true, "RO-BN": true,
- "RO-BR": true, "RO-BT": true, "RO-BV": true, "RO-BZ": true, "RO-CJ": true,
- "RO-CL": true, "RO-CS": true, "RO-CT": true, "RO-CV": true, "RO-DB": true,
- "RO-DJ": true, "RO-GJ": true, "RO-GL": true, "RO-GR": true, "RO-HD": true,
- "RO-HR": true, "RO-IF": true, "RO-IL": true, "RO-IS": true, "RO-MH": true,
- "RO-MM": true, "RO-MS": true, "RO-NT": true, "RO-OT": true, "RO-PH": true,
- "RO-SB": true, "RO-SJ": true, "RO-SM": true, "RO-SV": true, "RO-TL": true,
- "RO-TM": true, "RO-TR": true, "RO-VL": true, "RO-VN": true, "RO-VS": true,
- "RS-00": true, "RS-01": true, "RS-02": true, "RS-03": true, "RS-04": true,
- "RS-05": true, "RS-06": true, "RS-07": true, "RS-08": true, "RS-09": true,
- "RS-10": true, "RS-11": true, "RS-12": true, "RS-13": true, "RS-14": true,
- "RS-15": true, "RS-16": true, "RS-17": true, "RS-18": true, "RS-19": true,
- "RS-20": true, "RS-21": true, "RS-22": true, "RS-23": true, "RS-24": true,
- "RS-25": true, "RS-26": true, "RS-27": true, "RS-28": true, "RS-29": true,
- "RS-KM": true, "RS-VO": true, "RU-AD": true, "RU-AL": true, "RU-ALT": true,
- "RU-AMU": true, "RU-ARK": true, "RU-AST": true, "RU-BA": true, "RU-BEL": true,
- "RU-BRY": true, "RU-BU": true, "RU-CE": true, "RU-CHE": true, "RU-CHU": true,
- "RU-CU": true, "RU-DA": true, "RU-IN": true, "RU-IRK": true, "RU-IVA": true,
- "RU-KAM": true, "RU-KB": true, "RU-KC": true, "RU-KDA": true, "RU-KEM": true,
- "RU-KGD": true, "RU-KGN": true, "RU-KHA": true, "RU-KHM": true, "RU-KIR": true,
- "RU-KK": true, "RU-KL": true, "RU-KLU": true, "RU-KO": true, "RU-KOS": true,
- "RU-KR": true, "RU-KRS": true, "RU-KYA": true, "RU-LEN": true, "RU-LIP": true,
- "RU-MAG": true, "RU-ME": true, "RU-MO": true, "RU-MOS": true, "RU-MOW": true,
- "RU-MUR": true, "RU-NEN": true, "RU-NGR": true, "RU-NIZ": true, "RU-NVS": true,
- "RU-OMS": true, "RU-ORE": true, "RU-ORL": true, "RU-PER": true, "RU-PNZ": true,
- "RU-PRI": true, "RU-PSK": true, "RU-ROS": true, "RU-RYA": true, "RU-SA": true,
- "RU-SAK": true, "RU-SAM": true, "RU-SAR": true, "RU-SE": true, "RU-SMO": true,
- "RU-SPE": true, "RU-STA": true, "RU-SVE": true, "RU-TA": true, "RU-TAM": true,
- "RU-TOM": true, "RU-TUL": true, "RU-TVE": true, "RU-TY": true, "RU-TYU": true,
- "RU-UD": true, "RU-ULY": true, "RU-VGG": true, "RU-VLA": true, "RU-VLG": true,
- "RU-VOR": true, "RU-YAN": true, "RU-YAR": true, "RU-YEV": true, "RU-ZAB": true,
- "RW-01": true, "RW-02": true, "RW-03": true, "RW-04": true, "RW-05": true,
- "SA-01": true, "SA-02": true, "SA-03": true, "SA-04": true, "SA-05": true,
- "SA-06": true, "SA-07": true, "SA-08": true, "SA-09": true, "SA-10": true,
- "SA-11": true, "SA-12": true, "SA-14": true, "SB-CE": true, "SB-CH": true,
- "SB-CT": true, "SB-GU": true, "SB-IS": true, "SB-MK": true, "SB-ML": true,
- "SB-RB": true, "SB-TE": true, "SB-WE": true, "SC-01": true, "SC-02": true,
- "SC-03": true, "SC-04": true, "SC-05": true, "SC-06": true, "SC-07": true,
- "SC-08": true, "SC-09": true, "SC-10": true, "SC-11": true, "SC-12": true,
- "SC-13": true, "SC-14": true, "SC-15": true, "SC-16": true, "SC-17": true,
- "SC-18": true, "SC-19": true, "SC-20": true, "SC-21": true, "SC-22": true,
- "SC-23": true, "SC-24": true, "SC-25": true, "SD-DC": true, "SD-DE": true,
- "SD-DN": true, "SD-DS": true, "SD-DW": true, "SD-GD": true, "SD-GK": true, "SD-GZ": true,
- "SD-KA": true, "SD-KH": true, "SD-KN": true, "SD-KS": true, "SD-NB": true,
- "SD-NO": true, "SD-NR": true, "SD-NW": true, "SD-RS": true, "SD-SI": true,
- "SE-AB": true, "SE-AC": true, "SE-BD": true, "SE-C": true, "SE-D": true,
- "SE-E": true, "SE-F": true, "SE-G": true, "SE-H": true, "SE-I": true,
- "SE-K": true, "SE-M": true, "SE-N": true, "SE-O": true, "SE-S": true,
- "SE-T": true, "SE-U": true, "SE-W": true, "SE-X": true, "SE-Y": true,
- "SE-Z": true, "SG-01": true, "SG-02": true, "SG-03": true, "SG-04": true,
- "SG-05": true, "SH-AC": true, "SH-HL": true, "SH-TA": true, "SI-001": true,
- "SI-002": true, "SI-003": true, "SI-004": true, "SI-005": true, "SI-006": true,
- "SI-007": true, "SI-008": true, "SI-009": true, "SI-010": true, "SI-011": true,
- "SI-012": true, "SI-013": true, "SI-014": true, "SI-015": true, "SI-016": true,
- "SI-017": true, "SI-018": true, "SI-019": true, "SI-020": true, "SI-021": true,
- "SI-022": true, "SI-023": true, "SI-024": true, "SI-025": true, "SI-026": true,
- "SI-027": true, "SI-028": true, "SI-029": true, "SI-030": true, "SI-031": true,
- "SI-032": true, "SI-033": true, "SI-034": true, "SI-035": true, "SI-036": true,
- "SI-037": true, "SI-038": true, "SI-039": true, "SI-040": true, "SI-041": true,
- "SI-042": true, "SI-043": true, "SI-044": true, "SI-045": true, "SI-046": true,
- "SI-047": true, "SI-048": true, "SI-049": true, "SI-050": true, "SI-051": true,
- "SI-052": true, "SI-053": true, "SI-054": true, "SI-055": true, "SI-056": true,
- "SI-057": true, "SI-058": true, "SI-059": true, "SI-060": true, "SI-061": true,
- "SI-062": true, "SI-063": true, "SI-064": true, "SI-065": true, "SI-066": true,
- "SI-067": true, "SI-068": true, "SI-069": true, "SI-070": true, "SI-071": true,
- "SI-072": true, "SI-073": true, "SI-074": true, "SI-075": true, "SI-076": true,
- "SI-077": true, "SI-078": true, "SI-079": true, "SI-080": true, "SI-081": true,
- "SI-082": true, "SI-083": true, "SI-084": true, "SI-085": true, "SI-086": true,
- "SI-087": true, "SI-088": true, "SI-089": true, "SI-090": true, "SI-091": true,
- "SI-092": true, "SI-093": true, "SI-094": true, "SI-095": true, "SI-096": true,
- "SI-097": true, "SI-098": true, "SI-099": true, "SI-100": true, "SI-101": true,
- "SI-102": true, "SI-103": true, "SI-104": true, "SI-105": true, "SI-106": true,
- "SI-107": true, "SI-108": true, "SI-109": true, "SI-110": true, "SI-111": true,
- "SI-112": true, "SI-113": true, "SI-114": true, "SI-115": true, "SI-116": true,
- "SI-117": true, "SI-118": true, "SI-119": true, "SI-120": true, "SI-121": true,
- "SI-122": true, "SI-123": true, "SI-124": true, "SI-125": true, "SI-126": true,
- "SI-127": true, "SI-128": true, "SI-129": true, "SI-130": true, "SI-131": true,
- "SI-132": true, "SI-133": true, "SI-134": true, "SI-135": true, "SI-136": true,
- "SI-137": true, "SI-138": true, "SI-139": true, "SI-140": true, "SI-141": true,
- "SI-142": true, "SI-143": true, "SI-144": true, "SI-146": true, "SI-147": true,
- "SI-148": true, "SI-149": true, "SI-150": true, "SI-151": true, "SI-152": true,
- "SI-153": true, "SI-154": true, "SI-155": true, "SI-156": true, "SI-157": true,
- "SI-158": true, "SI-159": true, "SI-160": true, "SI-161": true, "SI-162": true,
- "SI-163": true, "SI-164": true, "SI-165": true, "SI-166": true, "SI-167": true,
- "SI-168": true, "SI-169": true, "SI-170": true, "SI-171": true, "SI-172": true,
- "SI-173": true, "SI-174": true, "SI-175": true, "SI-176": true, "SI-177": true,
- "SI-178": true, "SI-179": true, "SI-180": true, "SI-181": true, "SI-182": true,
- "SI-183": true, "SI-184": true, "SI-185": true, "SI-186": true, "SI-187": true,
- "SI-188": true, "SI-189": true, "SI-190": true, "SI-191": true, "SI-192": true,
- "SI-193": true, "SI-194": true, "SI-195": true, "SI-196": true, "SI-197": true,
- "SI-198": true, "SI-199": true, "SI-200": true, "SI-201": true, "SI-202": true,
- "SI-203": true, "SI-204": true, "SI-205": true, "SI-206": true, "SI-207": true,
- "SI-208": true, "SI-209": true, "SI-210": true, "SI-211": true, "SI-212": true, "SI-213": true, "SK-BC": true,
- "SK-BL": true, "SK-KI": true, "SK-NI": true, "SK-PV": true, "SK-TA": true,
- "SK-TC": true, "SK-ZI": true, "SL-E": true, "SL-N": true, "SL-S": true,
- "SL-W": true, "SM-01": true, "SM-02": true, "SM-03": true, "SM-04": true,
- "SM-05": true, "SM-06": true, "SM-07": true, "SM-08": true, "SM-09": true,
- "SN-DB": true, "SN-DK": true, "SN-FK": true, "SN-KA": true, "SN-KD": true,
- "SN-KE": true, "SN-KL": true, "SN-LG": true, "SN-MT": true, "SN-SE": true,
- "SN-SL": true, "SN-TC": true, "SN-TH": true, "SN-ZG": true, "SO-AW": true,
- "SO-BK": true, "SO-BN": true, "SO-BR": true, "SO-BY": true, "SO-GA": true,
- "SO-GE": true, "SO-HI": true, "SO-JD": true, "SO-JH": true, "SO-MU": true,
- "SO-NU": true, "SO-SA": true, "SO-SD": true, "SO-SH": true, "SO-SO": true,
- "SO-TO": true, "SO-WO": true, "SR-BR": true, "SR-CM": true, "SR-CR": true,
- "SR-MA": true, "SR-NI": true, "SR-PM": true, "SR-PR": true, "SR-SA": true,
- "SR-SI": true, "SR-WA": true, "SS-BN": true, "SS-BW": true, "SS-EC": true,
- "SS-EE8": true, "SS-EE": true, "SS-EW": true, "SS-JG": true, "SS-LK": true, "SS-NU": true,
- "SS-UY": true, "SS-WR": true, "ST-01": true, "ST-P": true, "ST-S": true, "SV-AH": true,
- "SV-CA": true, "SV-CH": true, "SV-CU": true, "SV-LI": true, "SV-MO": true,
- "SV-PA": true, "SV-SA": true, "SV-SM": true, "SV-SO": true, "SV-SS": true,
- "SV-SV": true, "SV-UN": true, "SV-US": true, "SY-DI": true, "SY-DR": true,
- "SY-DY": true, "SY-HA": true, "SY-HI": true, "SY-HL": true, "SY-HM": true,
- "SY-ID": true, "SY-LA": true, "SY-QU": true, "SY-RA": true, "SY-RD": true,
- "SY-SU": true, "SY-TA": true, "SZ-HH": true, "SZ-LU": true, "SZ-MA": true,
- "SZ-SH": true, "TD-BA": true, "TD-BG": true, "TD-BO": true, "TD-CB": true,
- "TD-EN": true, "TD-GR": true, "TD-HL": true, "TD-KA": true, "TD-LC": true,
- "TD-LO": true, "TD-LR": true, "TD-MA": true, "TD-MC": true, "TD-ME": true,
- "TD-MO": true, "TD-ND": true, "TD-OD": true, "TD-SA": true, "TD-SI": true,
- "TD-TA": true, "TD-TI": true, "TD-WF": true, "TG-C": true, "TG-K": true,
- "TG-M": true, "TG-P": true, "TG-S": true, "TH-10": true, "TH-11": true,
- "TH-12": true, "TH-13": true, "TH-14": true, "TH-15": true, "TH-16": true,
- "TH-17": true, "TH-18": true, "TH-19": true, "TH-20": true, "TH-21": true,
- "TH-22": true, "TH-23": true, "TH-24": true, "TH-25": true, "TH-26": true,
- "TH-27": true, "TH-30": true, "TH-31": true, "TH-32": true, "TH-33": true,
- "TH-34": true, "TH-35": true, "TH-36": true, "TH-37": true, "TH-38": true, "TH-39": true,
- "TH-40": true, "TH-41": true, "TH-42": true, "TH-43": true, "TH-44": true,
- "TH-45": true, "TH-46": true, "TH-47": true, "TH-48": true, "TH-49": true,
- "TH-50": true, "TH-51": true, "TH-52": true, "TH-53": true, "TH-54": true,
- "TH-55": true, "TH-56": true, "TH-57": true, "TH-58": true, "TH-60": true,
- "TH-61": true, "TH-62": true, "TH-63": true, "TH-64": true, "TH-65": true,
- "TH-66": true, "TH-67": true, "TH-70": true, "TH-71": true, "TH-72": true,
- "TH-73": true, "TH-74": true, "TH-75": true, "TH-76": true, "TH-77": true,
- "TH-80": true, "TH-81": true, "TH-82": true, "TH-83": true, "TH-84": true,
- "TH-85": true, "TH-86": true, "TH-90": true, "TH-91": true, "TH-92": true,
- "TH-93": true, "TH-94": true, "TH-95": true, "TH-96": true, "TH-S": true,
- "TJ-GB": true, "TJ-KT": true, "TJ-SU": true, "TJ-DU": true, "TJ-RA": true, "TL-AL": true, "TL-AN": true,
- "TL-BA": true, "TL-BO": true, "TL-CO": true, "TL-DI": true, "TL-ER": true,
- "TL-LA": true, "TL-LI": true, "TL-MF": true, "TL-MT": true, "TL-OE": true,
- "TL-VI": true, "TM-A": true, "TM-B": true, "TM-D": true, "TM-L": true,
- "TM-M": true, "TM-S": true, "TN-11": true, "TN-12": true, "TN-13": true,
- "TN-14": true, "TN-21": true, "TN-22": true, "TN-23": true, "TN-31": true,
- "TN-32": true, "TN-33": true, "TN-34": true, "TN-41": true, "TN-42": true,
- "TN-43": true, "TN-51": true, "TN-52": true, "TN-53": true, "TN-61": true,
- "TN-71": true, "TN-72": true, "TN-73": true, "TN-81": true, "TN-82": true,
- "TN-83": true, "TO-01": true, "TO-02": true, "TO-03": true, "TO-04": true,
- "TO-05": true, "TR-01": true, "TR-02": true, "TR-03": true, "TR-04": true,
- "TR-05": true, "TR-06": true, "TR-07": true, "TR-08": true, "TR-09": true,
- "TR-10": true, "TR-11": true, "TR-12": true, "TR-13": true, "TR-14": true,
- "TR-15": true, "TR-16": true, "TR-17": true, "TR-18": true, "TR-19": true,
- "TR-20": true, "TR-21": true, "TR-22": true, "TR-23": true, "TR-24": true,
- "TR-25": true, "TR-26": true, "TR-27": true, "TR-28": true, "TR-29": true,
- "TR-30": true, "TR-31": true, "TR-32": true, "TR-33": true, "TR-34": true,
- "TR-35": true, "TR-36": true, "TR-37": true, "TR-38": true, "TR-39": true,
- "TR-40": true, "TR-41": true, "TR-42": true, "TR-43": true, "TR-44": true,
- "TR-45": true, "TR-46": true, "TR-47": true, "TR-48": true, "TR-49": true,
- "TR-50": true, "TR-51": true, "TR-52": true, "TR-53": true, "TR-54": true,
- "TR-55": true, "TR-56": true, "TR-57": true, "TR-58": true, "TR-59": true,
- "TR-60": true, "TR-61": true, "TR-62": true, "TR-63": true, "TR-64": true,
- "TR-65": true, "TR-66": true, "TR-67": true, "TR-68": true, "TR-69": true,
- "TR-70": true, "TR-71": true, "TR-72": true, "TR-73": true, "TR-74": true,
- "TR-75": true, "TR-76": true, "TR-77": true, "TR-78": true, "TR-79": true,
- "TR-80": true, "TR-81": true, "TT-ARI": true, "TT-CHA": true, "TT-CTT": true,
- "TT-DMN": true, "TT-ETO": true, "TT-MRC": true, "TT-TOB": true, "TT-PED": true, "TT-POS": true, "TT-PRT": true,
- "TT-PTF": true, "TT-RCM": true, "TT-SFO": true, "TT-SGE": true, "TT-SIP": true,
- "TT-SJL": true, "TT-TUP": true, "TT-WTO": true, "TV-FUN": true, "TV-NIT": true,
- "TV-NKF": true, "TV-NKL": true, "TV-NMA": true, "TV-NMG": true, "TV-NUI": true,
- "TV-VAI": true, "TW-CHA": true, "TW-CYI": true, "TW-CYQ": true, "TW-KIN": true, "TW-HSQ": true,
- "TW-HSZ": true, "TW-HUA": true, "TW-LIE": true, "TW-ILA": true, "TW-KEE": true, "TW-KHH": true,
- "TW-KHQ": true, "TW-MIA": true, "TW-NAN": true, "TW-NWT": true, "TW-PEN": true, "TW-PIF": true,
- "TW-TAO": true, "TW-TNN": true, "TW-TNQ": true, "TW-TPE": true, "TW-TPQ": true,
- "TW-TTT": true, "TW-TXG": true, "TW-TXQ": true, "TW-YUN": true, "TZ-01": true,
- "TZ-02": true, "TZ-03": true, "TZ-04": true, "TZ-05": true, "TZ-06": true,
- "TZ-07": true, "TZ-08": true, "TZ-09": true, "TZ-10": true, "TZ-11": true,
- "TZ-12": true, "TZ-13": true, "TZ-14": true, "TZ-15": true, "TZ-16": true,
- "TZ-17": true, "TZ-18": true, "TZ-19": true, "TZ-20": true, "TZ-21": true,
- "TZ-22": true, "TZ-23": true, "TZ-24": true, "TZ-25": true, "TZ-26": true, "TZ-27": true, "TZ-28": true, "TZ-29": true, "TZ-30": true, "TZ-31": true,
- "UA-05": true, "UA-07": true, "UA-09": true, "UA-12": true, "UA-14": true,
- "UA-18": true, "UA-21": true, "UA-23": true, "UA-26": true, "UA-30": true,
- "UA-32": true, "UA-35": true, "UA-40": true, "UA-43": true, "UA-46": true,
- "UA-48": true, "UA-51": true, "UA-53": true, "UA-56": true, "UA-59": true,
- "UA-61": true, "UA-63": true, "UA-65": true, "UA-68": true, "UA-71": true,
- "UA-74": true, "UA-77": true, "UG-101": true, "UG-102": true, "UG-103": true,
- "UG-104": true, "UG-105": true, "UG-106": true, "UG-107": true, "UG-108": true,
- "UG-109": true, "UG-110": true, "UG-111": true, "UG-112": true, "UG-113": true,
- "UG-114": true, "UG-115": true, "UG-116": true, "UG-201": true, "UG-202": true,
- "UG-203": true, "UG-204": true, "UG-205": true, "UG-206": true, "UG-207": true,
- "UG-208": true, "UG-209": true, "UG-210": true, "UG-211": true, "UG-212": true,
- "UG-213": true, "UG-214": true, "UG-215": true, "UG-216": true, "UG-217": true,
- "UG-218": true, "UG-219": true, "UG-220": true, "UG-221": true, "UG-222": true,
- "UG-223": true, "UG-224": true, "UG-301": true, "UG-302": true, "UG-303": true,
- "UG-304": true, "UG-305": true, "UG-306": true, "UG-307": true, "UG-308": true,
- "UG-309": true, "UG-310": true, "UG-311": true, "UG-312": true, "UG-313": true,
- "UG-314": true, "UG-315": true, "UG-316": true, "UG-317": true, "UG-318": true,
- "UG-319": true, "UG-320": true, "UG-321": true, "UG-401": true, "UG-402": true,
- "UG-403": true, "UG-404": true, "UG-405": true, "UG-406": true, "UG-407": true,
- "UG-408": true, "UG-409": true, "UG-410": true, "UG-411": true, "UG-412": true,
- "UG-413": true, "UG-414": true, "UG-415": true, "UG-416": true, "UG-417": true,
- "UG-418": true, "UG-419": true, "UG-C": true, "UG-E": true, "UG-N": true,
- "UG-W": true, "UG-322": true, "UG-323": true, "UG-420": true, "UG-117": true,
- "UG-118": true, "UG-225": true, "UG-120": true, "UG-226": true,
- "UG-121": true, "UG-122": true, "UG-227": true, "UG-421": true,
- "UG-325": true, "UG-228": true, "UG-123": true, "UG-422": true,
- "UG-326": true, "UG-229": true, "UG-124": true, "UG-423": true,
- "UG-230": true, "UG-327": true, "UG-424": true, "UG-328": true,
- "UG-425": true, "UG-426": true, "UG-330": true,
- "UM-67": true, "UM-71": true, "UM-76": true, "UM-79": true,
- "UM-81": true, "UM-84": true, "UM-86": true, "UM-89": true, "UM-95": true,
- "US-AK": true, "US-AL": true, "US-AR": true, "US-AS": true, "US-AZ": true,
- "US-CA": true, "US-CO": true, "US-CT": true, "US-DC": true, "US-DE": true,
- "US-FL": true, "US-GA": true, "US-GU": true, "US-HI": true, "US-IA": true,
- "US-ID": true, "US-IL": true, "US-IN": true, "US-KS": true, "US-KY": true,
- "US-LA": true, "US-MA": true, "US-MD": true, "US-ME": true, "US-MI": true,
- "US-MN": true, "US-MO": true, "US-MP": true, "US-MS": true, "US-MT": true,
- "US-NC": true, "US-ND": true, "US-NE": true, "US-NH": true, "US-NJ": true,
- "US-NM": true, "US-NV": true, "US-NY": true, "US-OH": true, "US-OK": true,
- "US-OR": true, "US-PA": true, "US-PR": true, "US-RI": true, "US-SC": true,
- "US-SD": true, "US-TN": true, "US-TX": true, "US-UM": true, "US-UT": true,
- "US-VA": true, "US-VI": true, "US-VT": true, "US-WA": true, "US-WI": true,
- "US-WV": true, "US-WY": true, "UY-AR": true, "UY-CA": true, "UY-CL": true,
- "UY-CO": true, "UY-DU": true, "UY-FD": true, "UY-FS": true, "UY-LA": true,
- "UY-MA": true, "UY-MO": true, "UY-PA": true, "UY-RN": true, "UY-RO": true,
- "UY-RV": true, "UY-SA": true, "UY-SJ": true, "UY-SO": true, "UY-TA": true,
- "UY-TT": true, "UZ-AN": true, "UZ-BU": true, "UZ-FA": true, "UZ-JI": true,
- "UZ-NG": true, "UZ-NW": true, "UZ-QA": true, "UZ-QR": true, "UZ-SA": true,
- "UZ-SI": true, "UZ-SU": true, "UZ-TK": true, "UZ-TO": true, "UZ-XO": true,
- "VC-01": true, "VC-02": true, "VC-03": true, "VC-04": true, "VC-05": true,
- "VC-06": true, "VE-A": true, "VE-B": true, "VE-C": true, "VE-D": true,
- "VE-E": true, "VE-F": true, "VE-G": true, "VE-H": true, "VE-I": true,
- "VE-J": true, "VE-K": true, "VE-L": true, "VE-M": true, "VE-N": true,
- "VE-O": true, "VE-P": true, "VE-R": true, "VE-S": true, "VE-T": true,
- "VE-U": true, "VE-V": true, "VE-W": true, "VE-X": true, "VE-Y": true,
- "VE-Z": true, "VN-01": true, "VN-02": true, "VN-03": true, "VN-04": true,
- "VN-05": true, "VN-06": true, "VN-07": true, "VN-09": true, "VN-13": true,
- "VN-14": true, "VN-15": true, "VN-18": true, "VN-20": true, "VN-21": true,
- "VN-22": true, "VN-23": true, "VN-24": true, "VN-25": true, "VN-26": true,
- "VN-27": true, "VN-28": true, "VN-29": true, "VN-30": true, "VN-31": true,
- "VN-32": true, "VN-33": true, "VN-34": true, "VN-35": true, "VN-36": true,
- "VN-37": true, "VN-39": true, "VN-40": true, "VN-41": true, "VN-43": true,
- "VN-44": true, "VN-45": true, "VN-46": true, "VN-47": true, "VN-49": true,
- "VN-50": true, "VN-51": true, "VN-52": true, "VN-53": true, "VN-54": true,
- "VN-55": true, "VN-56": true, "VN-57": true, "VN-58": true, "VN-59": true,
- "VN-61": true, "VN-63": true, "VN-66": true, "VN-67": true, "VN-68": true,
- "VN-69": true, "VN-70": true, "VN-71": true, "VN-72": true, "VN-73": true,
- "VN-CT": true, "VN-DN": true, "VN-HN": true, "VN-HP": true, "VN-SG": true,
- "VU-MAP": true, "VU-PAM": true, "VU-SAM": true, "VU-SEE": true, "VU-TAE": true,
- "VU-TOB": true, "WF-SG": true,"WF-UV": true, "WS-AA": true, "WS-AL": true, "WS-AT": true, "WS-FA": true,
- "WS-GE": true, "WS-GI": true, "WS-PA": true, "WS-SA": true, "WS-TU": true,
- "WS-VF": true, "WS-VS": true, "YE-AB": true, "YE-AD": true, "YE-AM": true,
- "YE-BA": true, "YE-DA": true, "YE-DH": true, "YE-HD": true, "YE-HJ": true, "YE-HU": true,
- "YE-IB": true, "YE-JA": true, "YE-LA": true, "YE-MA": true, "YE-MR": true,
- "YE-MU": true, "YE-MW": true, "YE-RA": true, "YE-SA": true, "YE-SD": true, "YE-SH": true,
- "YE-SN": true, "YE-TA": true, "ZA-EC": true, "ZA-FS": true, "ZA-GP": true,
- "ZA-LP": true, "ZA-MP": true, "ZA-NC": true, "ZA-NW": true, "ZA-WC": true,
- "ZA-ZN": true, "ZA-KZN": true, "ZM-01": true, "ZM-02": true, "ZM-03": true, "ZM-04": true,
- "ZM-05": true, "ZM-06": true, "ZM-07": true, "ZM-08": true, "ZM-09": true, "ZM-10": true,
- "ZW-BU": true, "ZW-HA": true, "ZW-MA": true, "ZW-MC": true, "ZW-ME": true,
- "ZW-MI": true, "ZW-MN": true, "ZW-MS": true, "ZW-MV": true, "ZW-MW": true,
-}
diff --git a/vendor/github.com/go-playground/validator/v10/currency_codes.go b/vendor/github.com/go-playground/validator/v10/currency_codes.go
deleted file mode 100644
index a5cd9b18a0..0000000000
--- a/vendor/github.com/go-playground/validator/v10/currency_codes.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package validator
-
-var iso4217 = map[string]bool{
- "AFN": true, "EUR": true, "ALL": true, "DZD": true, "USD": true,
- "AOA": true, "XCD": true, "ARS": true, "AMD": true, "AWG": true,
- "AUD": true, "AZN": true, "BSD": true, "BHD": true, "BDT": true,
- "BBD": true, "BYN": true, "BZD": true, "XOF": true, "BMD": true,
- "INR": true, "BTN": true, "BOB": true, "BOV": true, "BAM": true,
- "BWP": true, "NOK": true, "BRL": true, "BND": true, "BGN": true,
- "BIF": true, "CVE": true, "KHR": true, "XAF": true, "CAD": true,
- "KYD": true, "CLP": true, "CLF": true, "CNY": true, "COP": true,
- "COU": true, "KMF": true, "CDF": true, "NZD": true, "CRC": true,
- "HRK": true, "CUP": true, "CUC": true, "ANG": true, "CZK": true,
- "DKK": true, "DJF": true, "DOP": true, "EGP": true, "SVC": true,
- "ERN": true, "SZL": true, "ETB": true, "FKP": true, "FJD": true,
- "XPF": true, "GMD": true, "GEL": true, "GHS": true, "GIP": true,
- "GTQ": true, "GBP": true, "GNF": true, "GYD": true, "HTG": true,
- "HNL": true, "HKD": true, "HUF": true, "ISK": true, "IDR": true,
- "XDR": true, "IRR": true, "IQD": true, "ILS": true, "JMD": true,
- "JPY": true, "JOD": true, "KZT": true, "KES": true, "KPW": true,
- "KRW": true, "KWD": true, "KGS": true, "LAK": true, "LBP": true,
- "LSL": true, "ZAR": true, "LRD": true, "LYD": true, "CHF": true,
- "MOP": true, "MKD": true, "MGA": true, "MWK": true, "MYR": true,
- "MVR": true, "MRU": true, "MUR": true, "XUA": true, "MXN": true,
- "MXV": true, "MDL": true, "MNT": true, "MAD": true, "MZN": true,
- "MMK": true, "NAD": true, "NPR": true, "NIO": true, "NGN": true,
- "OMR": true, "PKR": true, "PAB": true, "PGK": true, "PYG": true,
- "PEN": true, "PHP": true, "PLN": true, "QAR": true, "RON": true,
- "RUB": true, "RWF": true, "SHP": true, "WST": true, "STN": true,
- "SAR": true, "RSD": true, "SCR": true, "SLL": true, "SGD": true,
- "XSU": true, "SBD": true, "SOS": true, "SSP": true, "LKR": true,
- "SDG": true, "SRD": true, "SEK": true, "CHE": true, "CHW": true,
- "SYP": true, "TWD": true, "TJS": true, "TZS": true, "THB": true,
- "TOP": true, "TTD": true, "TND": true, "TRY": true, "TMT": true,
- "UGX": true, "UAH": true, "AED": true, "USN": true, "UYU": true,
- "UYI": true, "UYW": true, "UZS": true, "VUV": true, "VES": true,
- "VND": true, "YER": true, "ZMW": true, "ZWL": true, "XBA": true,
- "XBB": true, "XBC": true, "XBD": true, "XTS": true, "XXX": true,
- "XAU": true, "XPD": true, "XPT": true, "XAG": true,
-}
-
-var iso4217_numeric = map[int]bool{
- 8: true, 12: true, 32: true, 36: true, 44: true,
- 48: true, 50: true, 51: true, 52: true, 60: true,
- 64: true, 68: true, 72: true, 84: true, 90: true,
- 96: true, 104: true, 108: true, 116: true, 124: true,
- 132: true, 136: true, 144: true, 152: true, 156: true,
- 170: true, 174: true, 188: true, 191: true, 192: true,
- 203: true, 208: true, 214: true, 222: true, 230: true,
- 232: true, 238: true, 242: true, 262: true, 270: true,
- 292: true, 320: true, 324: true, 328: true, 332: true,
- 340: true, 344: true, 348: true, 352: true, 356: true,
- 360: true, 364: true, 368: true, 376: true, 388: true,
- 392: true, 398: true, 400: true, 404: true, 408: true,
- 410: true, 414: true, 417: true, 418: true, 422: true,
- 426: true, 430: true, 434: true, 446: true, 454: true,
- 458: true, 462: true, 480: true, 484: true, 496: true,
- 498: true, 504: true, 512: true, 516: true, 524: true,
- 532: true, 533: true, 548: true, 554: true, 558: true,
- 566: true, 578: true, 586: true, 590: true, 598: true,
- 600: true, 604: true, 608: true, 634: true, 643: true,
- 646: true, 654: true, 682: true, 690: true, 694: true,
- 702: true, 704: true, 706: true, 710: true, 728: true,
- 748: true, 752: true, 756: true, 760: true, 764: true,
- 776: true, 780: true, 784: true, 788: true, 800: true,
- 807: true, 818: true, 826: true, 834: true, 840: true,
- 858: true, 860: true, 882: true, 886: true, 901: true,
- 927: true, 928: true, 929: true, 930: true, 931: true,
- 932: true, 933: true, 934: true, 936: true, 938: true,
- 940: true, 941: true, 943: true, 944: true, 946: true,
- 947: true, 948: true, 949: true, 950: true, 951: true,
- 952: true, 953: true, 955: true, 956: true, 957: true,
- 958: true, 959: true, 960: true, 961: true, 962: true,
- 963: true, 964: true, 965: true, 967: true, 968: true,
- 969: true, 970: true, 971: true, 972: true, 973: true,
- 975: true, 976: true, 977: true, 978: true, 979: true,
- 980: true, 981: true, 984: true, 985: true, 986: true,
- 990: true, 994: true, 997: true, 999: true,
-}
diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go
deleted file mode 100644
index f31a7d538a..0000000000
--- a/vendor/github.com/go-playground/validator/v10/doc.go
+++ /dev/null
@@ -1,1464 +0,0 @@
-/*
-Package validator implements value validations for structs and individual fields
-based on tags.
-
-It can also handle Cross-Field and Cross-Struct validation for nested structs
-and has the ability to dive into arrays and maps of any type.
-
-see more examples https://github.com/go-playground/validator/tree/master/_examples
-
-# Singleton
-
-Validator is designed to be thread-safe and used as a singleton instance.
-It caches information about your struct and validations,
-in essence only parsing your validation tags once per struct type.
-Using multiple instances neglects the benefit of caching.
-The not thread-safe functions are explicitly marked as such in the documentation.
-
-# Validation Functions Return Type error
-
-Doing things this way is actually the way the standard library does, see the
-file.Open method here:
-
- https://golang.org/pkg/os/#Open.
-
-The authors return type "error" to avoid the issue discussed in the following,
-where err is always != nil:
-
- http://stackoverflow.com/a/29138676/3158232
- https://github.com/go-playground/validator/issues/134
-
-Validator only InvalidValidationError for bad validation input, nil or
-ValidationErrors as type error; so, in your code all you need to do is check
-if the error returned is not nil, and if it's not check if error is
-InvalidValidationError ( if necessary, most of the time it isn't ) type cast
-it to type ValidationErrors like so err.(validator.ValidationErrors).
-
-# Custom Validation Functions
-
-Custom Validation functions can be added. Example:
-
- // Structure
- func customFunc(fl validator.FieldLevel) bool {
-
- if fl.Field().String() == "invalid" {
- return false
- }
-
- return true
- }
-
- validate.RegisterValidation("custom tag name", customFunc)
- // NOTES: using the same tag name as an existing function
- // will overwrite the existing one
-
-# Cross-Field Validation
-
-Cross-Field Validation can be done via the following tags:
- - eqfield
- - nefield
- - gtfield
- - gtefield
- - ltfield
- - ltefield
- - eqcsfield
- - necsfield
- - gtcsfield
- - gtecsfield
- - ltcsfield
- - ltecsfield
-
-If, however, some custom cross-field validation is required, it can be done
-using a custom validation.
-
-Why not just have cross-fields validation tags (i.e. only eqcsfield and not
-eqfield)?
-
-The reason is efficiency. If you want to check a field within the same struct
-"eqfield" only has to find the field on the same struct (1 level). But, if we
-used "eqcsfield" it could be multiple levels down. Example:
-
- type Inner struct {
- StartDate time.Time
- }
-
- type Outer struct {
- InnerStructField *Inner
- CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
- }
-
- now := time.Now()
-
- inner := &Inner{
- StartDate: now,
- }
-
- outer := &Outer{
- InnerStructField: inner,
- CreatedAt: now,
- }
-
- errs := validate.Struct(outer)
-
- // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
- // into the function
- // when calling validate.VarWithValue(val, field, tag) val will be
- // whatever you pass, struct, field...
- // when calling validate.Field(field, tag) val will be nil
-
-# Multiple Validators
-
-Multiple validators on a field will process in the order defined. Example:
-
- type Test struct {
- Field `validate:"max=10,min=1"`
- }
-
- // max will be checked then min
-
-Bad Validator definitions are not handled by the library. Example:
-
- type Test struct {
- Field `validate:"min=10,max=0"`
- }
-
- // this definition of min max will never succeed
-
-# Using Validator Tags
-
-Baked In Cross-Field validation only compares fields on the same struct.
-If Cross-Field + Cross-Struct validation is needed you should implement your
-own custom validator.
-
-Comma (",") is the default separator of validation tags. If you wish to
-have a comma included within the parameter (i.e. excludesall=,) you will need to
-use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
-so the above will become excludesall=0x2C.
-
- type Test struct {
- Field `validate:"excludesall=,"` // BAD! Do not include a comma.
- Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
- }
-
-Pipe ("|") is the 'or' validation tags deparator. If you wish to
-have a pipe included within the parameter i.e. excludesall=| you will need to
-use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
-so the above will become excludesall=0x7C
-
- type Test struct {
- Field `validate:"excludesall=|"` // BAD! Do not include a pipe!
- Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
- }
-
-# Baked In Validators and Tags
-
-Here is a list of the current built in validators:
-
-# Skip Field
-
-Tells the validation to skip this struct field; this is particularly
-handy in ignoring embedded structs from being validated. (Usage: -)
-
- Usage: -
-
-# Or Operator
-
-This is the 'or' operator allowing multiple validators to be used and
-accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba
-colors to be accepted. This can also be combined with 'and' for example
-( Usage: omitempty,rgb|rgba)
-
- Usage: |
-
-# StructOnly
-
-When a field that is a nested struct is encountered, and contains this flag
-any validation on the nested struct will be run, but none of the nested
-struct fields will be validated. This is useful if inside of your program
-you know the struct will be valid, but need to verify it has been assigned.
-NOTE: only "required" and "omitempty" can be used on a struct itself.
-
- Usage: structonly
-
-# NoStructLevel
-
-Same as structonly tag except that any struct level validations will not run.
-
- Usage: nostructlevel
-
-# Omit Empty
-
-Allows conditional validation, for example if a field is not set with
-a value (Determined by the "required" validator) then other validation
-such as min or max won't run, but if a value is set validation will run.
-
- Usage: omitempty
-
-# Dive
-
-This tells the validator to dive into a slice, array or map and validate that
-level of the slice, array or map with the validation tags that follow.
-Multidimensional nesting is also supported, each level you wish to dive will
-require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see
-the Keys & EndKeys section just below.
-
- Usage: dive
-
-Example #1
-
- [][]string with validation tag "gt=0,dive,len=1,dive,required"
- // gt=0 will be applied to []
- // len=1 will be applied to []string
- // required will be applied to string
-
-Example #2
-
- [][]string with validation tag "gt=0,dive,dive,required"
- // gt=0 will be applied to []
- // []string will be spared validation
- // required will be applied to string
-
-Keys & EndKeys
-
-These are to be used together directly after the dive tag and tells the validator
-that anything between 'keys' and 'endkeys' applies to the keys of a map and not the
-values; think of it like the 'dive' tag, but for map keys instead of values.
-Multidimensional nesting is also supported, each level you wish to validate will
-require another 'keys' and 'endkeys' tag. These tags are only valid for maps.
-
- Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags
-
-Example #1
-
- map[string]string with validation tag "gt=0,dive,keys,eq=1|eq=2,endkeys,required"
- // gt=0 will be applied to the map itself
- // eq=1|eq=2 will be applied to the map keys
- // required will be applied to map values
-
-Example #2
-
- map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
- // gt=0 will be applied to the map itself
- // eq=1|eq=2 will be applied to each array element in the map keys
- // required will be applied to map values
-
-# Required
-
-This validates that the value is not the data types default zero value.
-For numbers ensures value is not zero. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
- Usage: required
-
-# Required If
-
-The field under validation must be present and not empty only if all
-the other specified fields are equal to the value following the specified
-field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
-
- Usage: required_if
-
-Examples:
-
- // require the field if the Field1 is equal to the parameter given:
- Usage: required_if=Field1 foobar
-
- // require the field if the Field1 and Field2 is equal to the value respectively:
- Usage: required_if=Field1 foo Field2 bar
-
-# Required Unless
-
-The field under validation must be present and not empty unless all
-the other specified fields are equal to the value following the specified
-field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
-
- Usage: required_unless
-
-Examples:
-
- // require the field unless the Field1 is equal to the parameter given:
- Usage: required_unless=Field1 foobar
-
- // require the field unless the Field1 and Field2 is equal to the value respectively:
- Usage: required_unless=Field1 foo Field2 bar
-
-# Required With
-
-The field under validation must be present and not empty only if any
-of the other specified fields are present. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
- Usage: required_with
-
-Examples:
-
- // require the field if the Field1 is present:
- Usage: required_with=Field1
-
- // require the field if the Field1 or Field2 is present:
- Usage: required_with=Field1 Field2
-
-# Required With All
-
-The field under validation must be present and not empty only if all
-of the other specified fields are present. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
- Usage: required_with_all
-
-Example:
-
- // require the field if the Field1 and Field2 is present:
- Usage: required_with_all=Field1 Field2
-
-# Required Without
-
-The field under validation must be present and not empty only when any
-of the other specified fields are not present. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
- Usage: required_without
-
-Examples:
-
- // require the field if the Field1 is not present:
- Usage: required_without=Field1
-
- // require the field if the Field1 or Field2 is not present:
- Usage: required_without=Field1 Field2
-
-# Required Without All
-
-The field under validation must be present and not empty only when all
-of the other specified fields are not present. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
-
- Usage: required_without_all
-
-Example:
-
- // require the field if the Field1 and Field2 is not present:
- Usage: required_without_all=Field1 Field2
-
-# Excluded If
-
-The field under validation must not be present or not empty only if all
-the other specified fields are equal to the value following the specified
-field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
-
- Usage: excluded_if
-
-Examples:
-
- // exclude the field if the Field1 is equal to the parameter given:
- Usage: excluded_if=Field1 foobar
-
- // exclude the field if the Field1 and Field2 is equal to the value respectively:
- Usage: excluded_if=Field1 foo Field2 bar
-
-# Excluded Unless
-
-The field under validation must not be present or empty unless all
-the other specified fields are equal to the value following the specified
-field. For strings ensures value is not "". For slices, maps, pointers,
-interfaces, channels and functions ensures the value is not nil.
-
- Usage: excluded_unless
-
-Examples:
-
- // exclude the field unless the Field1 is equal to the parameter given:
- Usage: excluded_unless=Field1 foobar
-
- // exclude the field unless the Field1 and Field2 is equal to the value respectively:
- Usage: excluded_unless=Field1 foo Field2 bar
-
-# Is Default
-
-This validates that the value is the default value and is almost the
-opposite of required.
-
- Usage: isdefault
-
-# Length
-
-For numbers, length will ensure that the value is
-equal to the parameter given. For strings, it checks that
-the string length is exactly that number of characters. For slices,
-arrays, and maps, validates the number of items.
-
-Example #1
-
- Usage: len=10
-
-Example #2 (time.Duration)
-
-For time.Duration, len will ensure that the value is equal to the duration given
-in the parameter.
-
- Usage: len=1h30m
-
-# Maximum
-
-For numbers, max will ensure that the value is
-less than or equal to the parameter given. For strings, it checks
-that the string length is at most that number of characters. For
-slices, arrays, and maps, validates the number of items.
-
-Example #1
-
- Usage: max=10
-
-Example #2 (time.Duration)
-
-For time.Duration, max will ensure that the value is less than or equal to the
-duration given in the parameter.
-
- Usage: max=1h30m
-
-# Minimum
-
-For numbers, min will ensure that the value is
-greater or equal to the parameter given. For strings, it checks that
-the string length is at least that number of characters. For slices,
-arrays, and maps, validates the number of items.
-
-Example #1
-
- Usage: min=10
-
-Example #2 (time.Duration)
-
-For time.Duration, min will ensure that the value is greater than or equal to
-the duration given in the parameter.
-
- Usage: min=1h30m
-
-# Equals
-
-For strings & numbers, eq will ensure that the value is
-equal to the parameter given. For slices, arrays, and maps,
-validates the number of items.
-
-Example #1
-
- Usage: eq=10
-
-Example #2 (time.Duration)
-
-For time.Duration, eq will ensure that the value is equal to the duration given
-in the parameter.
-
- Usage: eq=1h30m
-
-# Not Equal
-
-For strings & numbers, ne will ensure that the value is not
-equal to the parameter given. For slices, arrays, and maps,
-validates the number of items.
-
-Example #1
-
- Usage: ne=10
-
-Example #2 (time.Duration)
-
-For time.Duration, ne will ensure that the value is not equal to the duration
-given in the parameter.
-
- Usage: ne=1h30m
-
-# One Of
-
-For strings, ints, and uints, oneof will ensure that the value
-is one of the values in the parameter. The parameter should be
-a list of values separated by whitespace. Values may be
-strings or numbers. To match strings with spaces in them, include
-the target string between single quotes.
-
- Usage: oneof=red green
- oneof='red green' 'blue yellow'
- oneof=5 7 9
-
-# Greater Than
-
-For numbers, this will ensure that the value is greater than the
-parameter given. For strings, it checks that the string length
-is greater than that number of characters. For slices, arrays
-and maps it validates the number of items.
-
-Example #1
-
- Usage: gt=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is greater than time.Now.UTC().
-
- Usage: gt
-
-Example #3 (time.Duration)
-
-For time.Duration, gt will ensure that the value is greater than the duration
-given in the parameter.
-
- Usage: gt=1h30m
-
-# Greater Than or Equal
-
-Same as 'min' above. Kept both to make terminology with 'len' easier.
-
-Example #1
-
- Usage: gte=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is greater than or equal to time.Now.UTC().
-
- Usage: gte
-
-Example #3 (time.Duration)
-
-For time.Duration, gte will ensure that the value is greater than or equal to
-the duration given in the parameter.
-
- Usage: gte=1h30m
-
-# Less Than
-
-For numbers, this will ensure that the value is less than the parameter given.
-For strings, it checks that the string length is less than that number of
-characters. For slices, arrays, and maps it validates the number of items.
-
-Example #1
-
- Usage: lt=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is less than time.Now.UTC().
-
- Usage: lt
-
-Example #3 (time.Duration)
-
-For time.Duration, lt will ensure that the value is less than the duration given
-in the parameter.
-
- Usage: lt=1h30m
-
-# Less Than or Equal
-
-Same as 'max' above. Kept both to make terminology with 'len' easier.
-
-Example #1
-
- Usage: lte=10
-
-Example #2 (time.Time)
-
-For time.Time ensures the time value is less than or equal to time.Now.UTC().
-
- Usage: lte
-
-Example #3 (time.Duration)
-
-For time.Duration, lte will ensure that the value is less than or equal to the
-duration given in the parameter.
-
- Usage: lte=1h30m
-
-# Field Equals Another Field
-
-This will validate the field value against another fields value either within
-a struct or passed in field.
-
-Example #1:
-
- // Validation on Password field using:
- Usage: eqfield=ConfirmPassword
-
-Example #2:
-
- // Validating by field:
- validate.VarWithValue(password, confirmpassword, "eqfield")
-
-Field Equals Another Field (relative)
-
-This does the same as eqfield except that it validates the field provided relative
-to the top level struct.
-
- Usage: eqcsfield=InnerStructField.Field)
-
-# Field Does Not Equal Another Field
-
-This will validate the field value against another fields value either within
-a struct or passed in field.
-
-Examples:
-
- // Confirm two colors are not the same:
- //
- // Validation on Color field:
- Usage: nefield=Color2
-
- // Validating by field:
- validate.VarWithValue(color1, color2, "nefield")
-
-Field Does Not Equal Another Field (relative)
-
-This does the same as nefield except that it validates the field provided
-relative to the top level struct.
-
- Usage: necsfield=InnerStructField.Field
-
-# Field Greater Than Another Field
-
-Only valid for Numbers, time.Duration and time.Time types, this will validate
-the field value against another fields value either within a struct or passed in
-field. usage examples are for validation of a Start and End date:
-
-Example #1:
-
- // Validation on End field using:
- validate.Struct Usage(gtfield=Start)
-
-Example #2:
-
- // Validating by field:
- validate.VarWithValue(start, end, "gtfield")
-
-# Field Greater Than Another Relative Field
-
-This does the same as gtfield except that it validates the field provided
-relative to the top level struct.
-
- Usage: gtcsfield=InnerStructField.Field
-
-# Field Greater Than or Equal To Another Field
-
-Only valid for Numbers, time.Duration and time.Time types, this will validate
-the field value against another fields value either within a struct or passed in
-field. usage examples are for validation of a Start and End date:
-
-Example #1:
-
- // Validation on End field using:
- validate.Struct Usage(gtefield=Start)
-
-Example #2:
-
- // Validating by field:
- validate.VarWithValue(start, end, "gtefield")
-
-# Field Greater Than or Equal To Another Relative Field
-
-This does the same as gtefield except that it validates the field provided relative
-to the top level struct.
-
- Usage: gtecsfield=InnerStructField.Field
-
-# Less Than Another Field
-
-Only valid for Numbers, time.Duration and time.Time types, this will validate
-the field value against another fields value either within a struct or passed in
-field. usage examples are for validation of a Start and End date:
-
-Example #1:
-
- // Validation on End field using:
- validate.Struct Usage(ltfield=Start)
-
-Example #2:
-
- // Validating by field:
- validate.VarWithValue(start, end, "ltfield")
-
-# Less Than Another Relative Field
-
-This does the same as ltfield except that it validates the field provided relative
-to the top level struct.
-
- Usage: ltcsfield=InnerStructField.Field
-
-# Less Than or Equal To Another Field
-
-Only valid for Numbers, time.Duration and time.Time types, this will validate
-the field value against another fields value either within a struct or passed in
-field. usage examples are for validation of a Start and End date:
-
-Example #1:
-
- // Validation on End field using:
- validate.Struct Usage(ltefield=Start)
-
-Example #2:
-
- // Validating by field:
- validate.VarWithValue(start, end, "ltefield")
-
-# Less Than or Equal To Another Relative Field
-
-This does the same as ltefield except that it validates the field provided relative
-to the top level struct.
-
- Usage: ltecsfield=InnerStructField.Field
-
-# Field Contains Another Field
-
-This does the same as contains except for struct fields. It should only be used
-with string types. See the behavior of reflect.Value.String() for behavior on
-other types.
-
- Usage: containsfield=InnerStructField.Field
-
-# Field Excludes Another Field
-
-This does the same as excludes except for struct fields. It should only be used
-with string types. See the behavior of reflect.Value.String() for behavior on
-other types.
-
- Usage: excludesfield=InnerStructField.Field
-
-# Unique
-
-For arrays & slices, unique will ensure that there are no duplicates.
-For maps, unique will ensure that there are no duplicate values.
-For slices of struct, unique will ensure that there are no duplicate values
-in a field of the struct specified via a parameter.
-
- // For arrays, slices, and maps:
- Usage: unique
-
- // For slices of struct:
- Usage: unique=field
-
-# Alpha Only
-
-This validates that a string value contains ASCII alpha characters only
-
- Usage: alpha
-
-# Alphanumeric
-
-This validates that a string value contains ASCII alphanumeric characters only
-
- Usage: alphanum
-
-# Alpha Unicode
-
-This validates that a string value contains unicode alpha characters only
-
- Usage: alphaunicode
-
-# Alphanumeric Unicode
-
-This validates that a string value contains unicode alphanumeric characters only
-
- Usage: alphanumunicode
-
-# Boolean
-
-This validates that a string value can successfully be parsed into a boolean with strconv.ParseBool
-
- Usage: boolean
-
-# Number
-
-This validates that a string value contains number values only.
-For integers or float it returns true.
-
- Usage: number
-
-# Numeric
-
-This validates that a string value contains a basic numeric value.
-basic excludes exponents etc...
-for integers or float it returns true.
-
- Usage: numeric
-
-# Hexadecimal String
-
-This validates that a string value contains a valid hexadecimal.
-
- Usage: hexadecimal
-
-# Hexcolor String
-
-This validates that a string value contains a valid hex color including
-hashtag (#)
-
- Usage: hexcolor
-
-# Lowercase String
-
-This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string.
-
- Usage: lowercase
-
-# Uppercase String
-
-This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string.
-
- Usage: uppercase
-
-# RGB String
-
-This validates that a string value contains a valid rgb color
-
- Usage: rgb
-
-# RGBA String
-
-This validates that a string value contains a valid rgba color
-
- Usage: rgba
-
-# HSL String
-
-This validates that a string value contains a valid hsl color
-
- Usage: hsl
-
-# HSLA String
-
-This validates that a string value contains a valid hsla color
-
- Usage: hsla
-
-# E.164 Phone Number String
-
-This validates that a string value contains a valid E.164 Phone number
-https://en.wikipedia.org/wiki/E.164 (ex. +1123456789)
-
- Usage: e164
-
-# E-mail String
-
-This validates that a string value contains a valid email
-This may not conform to all possibilities of any rfc standard, but neither
-does any email provider accept all possibilities.
-
- Usage: email
-
-# JSON String
-
-This validates that a string value is valid JSON
-
- Usage: json
-
-# JWT String
-
-This validates that a string value is a valid JWT
-
- Usage: jwt
-
-
-# File
-
-This validates that a string value contains a valid file path and that
-the file exists on the machine.
-This is done using os.Stat, which is a platform independent function.
-
- Usage: file
-
-
-# File Path
-
-This validates that a string value contains a valid file path but does not
-validate the existence of that file.
-This is done using os.Stat, which is a platform independent function.
-
- Usage: filepath
-
-
-# URL String
-
-This validates that a string value contains a valid url
-This will accept any url the golang request uri accepts but must contain
-a schema for example http:// or rtmp://
-
- Usage: url
-
-# URI String
-
-This validates that a string value contains a valid uri
-This will accept any uri the golang request uri accepts
-
- Usage: uri
-
-# Urn RFC 2141 String
-
-This validataes that a string value contains a valid URN
-according to the RFC 2141 spec.
-
- Usage: urn_rfc2141
-
-# Base64 String
-
-This validates that a string value contains a valid base64 value.
-Although an empty string is valid base64 this will report an empty string
-as an error, if you wish to accept an empty string as valid you can use
-this with the omitempty tag.
-
- Usage: base64
-
-# Base64URL String
-
-This validates that a string value contains a valid base64 URL safe value
-according the RFC4648 spec.
-Although an empty string is a valid base64 URL safe value, this will report
-an empty string as an error, if you wish to accept an empty string as valid
-you can use this with the omitempty tag.
-
- Usage: base64url
-
-
-# Base64RawURL String
-
-This validates that a string value contains a valid base64 URL safe value,
-but without = padding, according the RFC4648 spec, section 3.2.
-Although an empty string is a valid base64 URL safe value, this will report
-an empty string as an error, if you wish to accept an empty string as valid
-you can use this with the omitempty tag.
-
- Usage: base64url
-
-
-# Bitcoin Address
-
-This validates that a string value contains a valid bitcoin address.
-The format of the string is checked to ensure it matches one of the three formats
-P2PKH, P2SH and performs checksum validation.
-
- Usage: btc_addr
-
-Bitcoin Bech32 Address (segwit)
-
-This validates that a string value contains a valid bitcoin Bech32 address as defined
-by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki)
-Special thanks to Pieter Wuille for providng reference implementations.
-
- Usage: btc_addr_bech32
-
-# Ethereum Address
-
-This validates that a string value contains a valid ethereum address.
-The format of the string is checked to ensure it matches the standard Ethereum address format.
-
- Usage: eth_addr
-
-# Contains
-
-This validates that a string value contains the substring value.
-
- Usage: contains=@
-
-# Contains Any
-
-This validates that a string value contains any Unicode code points
-in the substring value.
-
- Usage: containsany=!@#?
-
-# Contains Rune
-
-This validates that a string value contains the supplied rune value.
-
- Usage: containsrune=@
-
-# Excludes
-
-This validates that a string value does not contain the substring value.
-
- Usage: excludes=@
-
-# Excludes All
-
-This validates that a string value does not contain any Unicode code
-points in the substring value.
-
- Usage: excludesall=!@#?
-
-# Excludes Rune
-
-This validates that a string value does not contain the supplied rune value.
-
- Usage: excludesrune=@
-
-# Starts With
-
-This validates that a string value starts with the supplied string value
-
- Usage: startswith=hello
-
-# Ends With
-
-This validates that a string value ends with the supplied string value
-
- Usage: endswith=goodbye
-
-# Does Not Start With
-
-This validates that a string value does not start with the supplied string value
-
- Usage: startsnotwith=hello
-
-# Does Not End With
-
-This validates that a string value does not end with the supplied string value
-
- Usage: endsnotwith=goodbye
-
-# International Standard Book Number
-
-This validates that a string value contains a valid isbn10 or isbn13 value.
-
- Usage: isbn
-
-# International Standard Book Number 10
-
-This validates that a string value contains a valid isbn10 value.
-
- Usage: isbn10
-
-# International Standard Book Number 13
-
-This validates that a string value contains a valid isbn13 value.
-
- Usage: isbn13
-
-# Universally Unique Identifier UUID
-
-This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead.
-
- Usage: uuid
-
-# Universally Unique Identifier UUID v3
-
-This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead.
-
- Usage: uuid3
-
-# Universally Unique Identifier UUID v4
-
-This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead.
-
- Usage: uuid4
-
-# Universally Unique Identifier UUID v5
-
-This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead.
-
- Usage: uuid5
-
-# Universally Unique Lexicographically Sortable Identifier ULID
-
-This validates that a string value contains a valid ULID value.
-
- Usage: ulid
-
-# ASCII
-
-This validates that a string value contains only ASCII characters.
-NOTE: if the string is blank, this validates as true.
-
- Usage: ascii
-
-# Printable ASCII
-
-This validates that a string value contains only printable ASCII characters.
-NOTE: if the string is blank, this validates as true.
-
- Usage: printascii
-
-# Multi-Byte Characters
-
-This validates that a string value contains one or more multibyte characters.
-NOTE: if the string is blank, this validates as true.
-
- Usage: multibyte
-
-# Data URL
-
-This validates that a string value contains a valid DataURI.
-NOTE: this will also validate that the data portion is valid base64
-
- Usage: datauri
-
-# Latitude
-
-This validates that a string value contains a valid latitude.
-
- Usage: latitude
-
-# Longitude
-
-This validates that a string value contains a valid longitude.
-
- Usage: longitude
-
-# Social Security Number SSN
-
-This validates that a string value contains a valid U.S. Social Security Number.
-
- Usage: ssn
-
-# Internet Protocol Address IP
-
-This validates that a string value contains a valid IP Address.
-
- Usage: ip
-
-# Internet Protocol Address IPv4
-
-This validates that a string value contains a valid v4 IP Address.
-
- Usage: ipv4
-
-# Internet Protocol Address IPv6
-
-This validates that a string value contains a valid v6 IP Address.
-
- Usage: ipv6
-
-# Classless Inter-Domain Routing CIDR
-
-This validates that a string value contains a valid CIDR Address.
-
- Usage: cidr
-
-# Classless Inter-Domain Routing CIDRv4
-
-This validates that a string value contains a valid v4 CIDR Address.
-
- Usage: cidrv4
-
-# Classless Inter-Domain Routing CIDRv6
-
-This validates that a string value contains a valid v6 CIDR Address.
-
- Usage: cidrv6
-
-# Transmission Control Protocol Address TCP
-
-This validates that a string value contains a valid resolvable TCP Address.
-
- Usage: tcp_addr
-
-# Transmission Control Protocol Address TCPv4
-
-This validates that a string value contains a valid resolvable v4 TCP Address.
-
- Usage: tcp4_addr
-
-# Transmission Control Protocol Address TCPv6
-
-This validates that a string value contains a valid resolvable v6 TCP Address.
-
- Usage: tcp6_addr
-
-# User Datagram Protocol Address UDP
-
-This validates that a string value contains a valid resolvable UDP Address.
-
- Usage: udp_addr
-
-# User Datagram Protocol Address UDPv4
-
-This validates that a string value contains a valid resolvable v4 UDP Address.
-
- Usage: udp4_addr
-
-# User Datagram Protocol Address UDPv6
-
-This validates that a string value contains a valid resolvable v6 UDP Address.
-
- Usage: udp6_addr
-
-# Internet Protocol Address IP
-
-This validates that a string value contains a valid resolvable IP Address.
-
- Usage: ip_addr
-
-# Internet Protocol Address IPv4
-
-This validates that a string value contains a valid resolvable v4 IP Address.
-
- Usage: ip4_addr
-
-# Internet Protocol Address IPv6
-
-This validates that a string value contains a valid resolvable v6 IP Address.
-
- Usage: ip6_addr
-
-# Unix domain socket end point Address
-
-This validates that a string value contains a valid Unix Address.
-
- Usage: unix_addr
-
-# Media Access Control Address MAC
-
-This validates that a string value contains a valid MAC Address.
-
- Usage: mac
-
-Note: See Go's ParseMAC for accepted formats and types:
-
- http://golang.org/src/net/mac.go?s=866:918#L29
-
-# Hostname RFC 952
-
-This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952
-
- Usage: hostname
-
-# Hostname RFC 1123
-
-This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123
-
- Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias.
-
-Full Qualified Domain Name (FQDN)
-
-This validates that a string value contains a valid FQDN.
-
- Usage: fqdn
-
-# HTML Tags
-
-This validates that a string value appears to be an HTML element tag
-including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element
-
- Usage: html
-
-# HTML Encoded
-
-This validates that a string value is a proper character reference in decimal
-or hexadecimal format
-
- Usage: html_encoded
-
-# URL Encoded
-
-This validates that a string value is percent-encoded (URL encoded) according
-to https://tools.ietf.org/html/rfc3986#section-2.1
-
- Usage: url_encoded
-
-# Directory
-
-This validates that a string value contains a valid directory and that
-it exists on the machine.
-This is done using os.Stat, which is a platform independent function.
-
- Usage: dir
-
-
-# Directory Path
-
-This validates that a string value contains a valid directory but does
-not validate the existence of that directory.
-This is done using os.Stat, which is a platform independent function.
-It is safest to suffix the string with os.PathSeparator if the directory
-may not exist at the time of validation.
-
- Usage: dirpath
-
-
-# HostPort
-
-This validates that a string value contains a valid DNS hostname and port that
-can be used to valiate fields typically passed to sockets and connections.
-
- Usage: hostname_port
-
-# Datetime
-
-This validates that a string value is a valid datetime based on the supplied datetime format.
-Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/
-
- Usage: datetime=2006-01-02
-
-# Iso3166-1 alpha-2
-
-This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard.
-see: https://www.iso.org/iso-3166-country-codes.html
-
- Usage: iso3166_1_alpha2
-
-# Iso3166-1 alpha-3
-
-This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard.
-see: https://www.iso.org/iso-3166-country-codes.html
-
- Usage: iso3166_1_alpha3
-
-# Iso3166-1 alpha-numeric
-
-This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard.
-see: https://www.iso.org/iso-3166-country-codes.html
-
- Usage: iso3166_1_alpha3
-
-# BCP 47 Language Tag
-
-This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse.
-More information on https://pkg.go.dev/golang.org/x/text/language
-
- Usage: bcp47_language_tag
-
-BIC (SWIFT code)
-
-This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362.
-More information on https://www.iso.org/standard/60390.html
-
- Usage: bic
-
-# RFC 1035 label
-
-This validates that a string value is a valid dns RFC 1035 label, defined in RFC 1035.
-More information on https://datatracker.ietf.org/doc/html/rfc1035
-
- Usage: dns_rfc1035_label
-
-# TimeZone
-
-This validates that a string value is a valid time zone based on the time zone database present on the system.
-Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator.
-More information on https://golang.org/pkg/time/#LoadLocation
-
- Usage: timezone
-
-# Semantic Version
-
-This validates that a string value is a valid semver version, defined in Semantic Versioning 2.0.0.
-More information on https://semver.org/
-
- Usage: semver
-
-
-# CVE Identifier
-
-This validates that a string value is a valid cve id, defined in cve mitre.
-More information on https://cve.mitre.org/
-
- Usage: cve
-
-
-# Credit Card
-
-This validates that a string value contains a valid credit card number using Luhn algorithm.
-
- Usage: credit_card
-
-
-# Luhn Checksum
-
- Usage: luhn_checksum
-
-This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm.
-
-# MongoDb ObjectID
-
-This validates that a string is a valid 24 character hexadecimal string.
-
- Usage: mongodb
-
-
-# Cron
-
-This validates that a string value contains a valid cron expression.
-
- Usage: cron
-
-Alias Validators and Tags
-
-Alias Validators and Tags
-NOTE: When returning an error, the tag returned in "FieldError" will be
-the alias tag unless the dive tag is part of the alias. Everything after the
-dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
-case will be the actual tag within the alias that failed.
-
-Here is a list of the current built in alias tags:
-
- "iscolor"
- alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
- "country_code"
- alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code)
-
-Validator notes:
-
- regex
- a regex validator won't be added because commas and = signs can be part
- of a regex which conflict with the validation definitions. Although
- workarounds can be made, they take away from using pure regex's.
- Furthermore it's quick and dirty but the regex's become harder to
- maintain and are not reusable, so it's as much a programming philosophy
- as anything.
-
- In place of this new validator functions should be created; a regex can
- be used within the validator function and even be precompiled for better
- efficiency within regexes.go.
-
- And the best reason, you can submit a pull request and we can keep on
- adding to the validation library of this package!
-
-# Non standard validators
-
-A collection of validation rules that are frequently needed but are more
-complex than the ones found in the baked in validators.
-A non standard validator must be registered manually like you would
-with your own custom validation functions.
-
-Example of registration and use:
-
- type Test struct {
- TestField string `validate:"yourtag"`
- }
-
- t := &Test{
- TestField: "Test"
- }
-
- validate := validator.New()
- validate.RegisterValidation("yourtag", validators.NotBlank)
-
-Here is a list of the current non standard validators:
-
- NotBlank
- This validates that the value is not blank or with length zero.
- For strings ensures they do not contain only spaces. For channels, maps, slices and arrays
- ensures they don't have zero length. For others, a non empty value is required.
-
- Usage: notblank
-
-# Panics
-
-This package panics when bad input is provided, this is by design, bad code like
-that should not make it to production.
-
- type Test struct {
- TestField string `validate:"nonexistantfunction=1"`
- }
-
- t := &Test{
- TestField: "Test"
- }
-
- validate.Struct(t) // this will panic
-*/
-package validator
diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go
deleted file mode 100644
index 5856d57c8c..0000000000
--- a/vendor/github.com/go-playground/validator/v10/errors.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package validator
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strings"
-
- ut "github.com/go-playground/universal-translator"
-)
-
-const (
- fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
-)
-
-// ValidationErrorsTranslations is the translation return type
-type ValidationErrorsTranslations map[string]string
-
-// InvalidValidationError describes an invalid argument passed to
-// `Struct`, `StructExcept`, StructPartial` or `Field`
-type InvalidValidationError struct {
- Type reflect.Type
-}
-
-// Error returns InvalidValidationError message
-func (e *InvalidValidationError) Error() string {
-
- if e.Type == nil {
- return "validator: (nil)"
- }
-
- return "validator: (nil " + e.Type.String() + ")"
-}
-
-// ValidationErrors is an array of FieldError's
-// for use in custom error messages post validation.
-type ValidationErrors []FieldError
-
-// Error is intended for use in development + debugging and not intended to be a production error message.
-// It allows ValidationErrors to subscribe to the Error interface.
-// All information to create an error message specific to your application is contained within
-// the FieldError found within the ValidationErrors array
-func (ve ValidationErrors) Error() string {
-
- buff := bytes.NewBufferString("")
-
- for i := 0; i < len(ve); i++ {
-
- buff.WriteString(ve[i].Error())
- buff.WriteString("\n")
- }
-
- return strings.TrimSpace(buff.String())
-}
-
-// Translate translates all of the ValidationErrors
-func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
-
- trans := make(ValidationErrorsTranslations)
-
- var fe *fieldError
-
- for i := 0; i < len(ve); i++ {
- fe = ve[i].(*fieldError)
-
- // // in case an Anonymous struct was used, ensure that the key
- // // would be 'Username' instead of ".Username"
- // if len(fe.ns) > 0 && fe.ns[:1] == "." {
- // trans[fe.ns[1:]] = fe.Translate(ut)
- // continue
- // }
-
- trans[fe.ns] = fe.Translate(ut)
- }
-
- return trans
-}
-
-// FieldError contains all functions to get error details
-type FieldError interface {
-
- // Tag returns the validation tag that failed. if the
- // validation was an alias, this will return the
- // alias name and not the underlying tag that failed.
- //
- // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
- // will return "iscolor"
- Tag() string
-
- // ActualTag returns the validation tag that failed, even if an
- // alias the actual tag within the alias will be returned.
- // If an 'or' validation fails the entire or will be returned.
- //
- // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
- // will return "hexcolor|rgb|rgba|hsl|hsla"
- ActualTag() string
-
- // Namespace returns the namespace for the field error, with the tag
- // name taking precedence over the field's actual name.
- //
- // eg. JSON name "User.fname"
- //
- // See StructNamespace() for a version that returns actual names.
- //
- // NOTE: this field can be blank when validating a single primitive field
- // using validate.Field(...) as there is no way to extract it's name
- Namespace() string
-
- // StructNamespace returns the namespace for the field error, with the field's
- // actual name.
- //
- // eq. "User.FirstName" see Namespace for comparison
- //
- // NOTE: this field can be blank when validating a single primitive field
- // using validate.Field(...) as there is no way to extract its name
- StructNamespace() string
-
- // Field returns the fields name with the tag name taking precedence over the
- // field's actual name.
- //
- // eq. JSON name "fname"
- // see StructField for comparison
- Field() string
-
- // StructField returns the field's actual name from the struct, when able to determine.
- //
- // eq. "FirstName"
- // see Field for comparison
- StructField() string
-
- // Value returns the actual field's value in case needed for creating the error
- // message
- Value() interface{}
-
- // Param returns the param value, in string form for comparison; this will also
- // help with generating an error message
- Param() string
-
- // Kind returns the Field's reflect Kind
- //
- // eg. time.Time's kind is a struct
- Kind() reflect.Kind
-
- // Type returns the Field's reflect Type
- //
- // eg. time.Time's type is time.Time
- Type() reflect.Type
-
- // Translate returns the FieldError's translated error
- // from the provided 'ut.Translator' and registered 'TranslationFunc'
- //
- // NOTE: if no registered translator can be found it returns the same as
- // calling fe.Error()
- Translate(ut ut.Translator) string
-
- // Error returns the FieldError's message
- Error() string
-}
-
-// compile time interface checks
-var _ FieldError = new(fieldError)
-var _ error = new(fieldError)
-
-// fieldError contains a single field's validation error along
-// with other properties that may be needed for error message creation
-// it complies with the FieldError interface
-type fieldError struct {
- v *Validate
- tag string
- actualTag string
- ns string
- structNs string
- fieldLen uint8
- structfieldLen uint8
- value interface{}
- param string
- kind reflect.Kind
- typ reflect.Type
-}
-
-// Tag returns the validation tag that failed.
-func (fe *fieldError) Tag() string {
- return fe.tag
-}
-
-// ActualTag returns the validation tag that failed, even if an
-// alias the actual tag within the alias will be returned.
-func (fe *fieldError) ActualTag() string {
- return fe.actualTag
-}
-
-// Namespace returns the namespace for the field error, with the tag
-// name taking precedence over the field's actual name.
-func (fe *fieldError) Namespace() string {
- return fe.ns
-}
-
-// StructNamespace returns the namespace for the field error, with the field's
-// actual name.
-func (fe *fieldError) StructNamespace() string {
- return fe.structNs
-}
-
-// Field returns the field's name with the tag name taking precedence over the
-// field's actual name.
-func (fe *fieldError) Field() string {
-
- return fe.ns[len(fe.ns)-int(fe.fieldLen):]
- // // return fe.field
- // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
-
- // log.Println("FLD:", fld)
-
- // if len(fld) > 0 && fld[:1] == "." {
- // return fld[1:]
- // }
-
- // return fld
-}
-
-// StructField returns the field's actual name from the struct, when able to determine.
-func (fe *fieldError) StructField() string {
- // return fe.structField
- return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
-}
-
-// Value returns the actual field's value in case needed for creating the error
-// message
-func (fe *fieldError) Value() interface{} {
- return fe.value
-}
-
-// Param returns the param value, in string form for comparison; this will
-// also help with generating an error message
-func (fe *fieldError) Param() string {
- return fe.param
-}
-
-// Kind returns the Field's reflect Kind
-func (fe *fieldError) Kind() reflect.Kind {
- return fe.kind
-}
-
-// Type returns the Field's reflect Type
-func (fe *fieldError) Type() reflect.Type {
- return fe.typ
-}
-
-// Error returns the fieldError's error message
-func (fe *fieldError) Error() string {
- return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
-}
-
-// Translate returns the FieldError's translated error
-// from the provided 'ut.Translator' and registered 'TranslationFunc'
-//
-// NOTE: if no registered translation can be found, it returns the original
-// untranslated error message.
-func (fe *fieldError) Translate(ut ut.Translator) string {
-
- m, ok := fe.v.transTagFunc[ut]
- if !ok {
- return fe.Error()
- }
-
- fn, ok := m[fe.tag]
- if !ok {
- return fe.Error()
- }
-
- return fn(ut, fe)
-}
diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go
deleted file mode 100644
index ef35826ee6..0000000000
--- a/vendor/github.com/go-playground/validator/v10/field_level.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package validator
-
-import "reflect"
-
-// FieldLevel contains all the information and helper functions
-// to validate a field
-type FieldLevel interface {
-
- // Top returns the top level struct, if any
- Top() reflect.Value
-
- // Parent returns the current fields parent struct, if any or
- // the comparison value if called 'VarWithValue'
- Parent() reflect.Value
-
- // Field returns current field for validation
- Field() reflect.Value
-
- // FieldName returns the field's name with the tag
- // name taking precedence over the fields actual name.
- FieldName() string
-
- // StructFieldName returns the struct field's name
- StructFieldName() string
-
- // Param returns param for validation against current field
- Param() string
-
- // GetTag returns the current validations tag name
- GetTag() string
-
- // ExtractType gets the actual underlying type of field value.
- // It will dive into pointers, customTypes and return you the
- // underlying value and it's kind.
- ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
-
- // GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace
- // in the param and returns the field, field kind and whether is was successful in retrieving
- // the field at all.
- //
- // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
- // could not be retrieved because it didn't exist.
- //
- // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
- GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
-
- // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
- // the field and namespace allowing more extensibility for validators.
- //
- // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
- GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
-
- // GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace
- // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
- // the field at all.
- //
- // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
- // could not be retrieved because it didn't exist.
- GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
-
- // GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
- // the field and namespace allowing more extensibility for validators.
- GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
-}
-
-var _ FieldLevel = new(validate)
-
-// Field returns current field for validation
-func (v *validate) Field() reflect.Value {
- return v.flField
-}
-
-// FieldName returns the field's name with the tag
-// name taking precedence over the fields actual name.
-func (v *validate) FieldName() string {
- return v.cf.altName
-}
-
-// GetTag returns the current validations tag name
-func (v *validate) GetTag() string {
- return v.ct.tag
-}
-
-// StructFieldName returns the struct field's name
-func (v *validate) StructFieldName() string {
- return v.cf.name
-}
-
-// Param returns param for validation against current field
-func (v *validate) Param() string {
- return v.ct.param
-}
-
-// GetStructFieldOK returns Param returns param for validation against current field
-//
-// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
-func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
- current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param)
- return current, kind, found
-}
-
-// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
-// the field and namespace allowing more extensibility for validators.
-//
-// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
-func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
- current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace)
- return current, kind, found
-}
-
-// GetStructFieldOK2 returns Param returns param for validation against current field
-func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
- return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
-}
-
-// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
-// the field and namespace allowing more extensibility for validators.
-func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
- return v.getStructFieldOKInternal(val, namespace)
-}
diff --git a/vendor/github.com/go-playground/validator/v10/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png
deleted file mode 100644
index 355000f524..0000000000
Binary files a/vendor/github.com/go-playground/validator/v10/logo.png and /dev/null differ
diff --git a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
deleted file mode 100644
index e7e7b687f4..0000000000
--- a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package validator
-
-import "regexp"
-
-var postCodePatternDict = map[string]string{
- "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`,
- "JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
- "GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
- "IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
- "US": `^\d{5}([ \-]\d{4})?$`,
- "CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`,
- "DE": `^\d{5}$`,
- "JP": `^\d{3}-\d{4}$`,
- "FR": `^\d{2}[ ]?\d{3}$`,
- "AU": `^\d{4}$`,
- "IT": `^\d{5}$`,
- "CH": `^\d{4}$`,
- "AT": `^\d{4}$`,
- "ES": `^\d{5}$`,
- "NL": `^\d{4}[ ]?[A-Z]{2}$`,
- "BE": `^\d{4}$`,
- "DK": `^\d{4}$`,
- "SE": `^\d{3}[ ]?\d{2}$`,
- "NO": `^\d{4}$`,
- "BR": `^\d{5}[\-]?\d{3}$`,
- "PT": `^\d{4}([\-]\d{3})?$`,
- "FI": `^\d{5}$`,
- "AX": `^22\d{3}$`,
- "KR": `^\d{3}[\-]\d{3}$`,
- "CN": `^\d{6}$`,
- "TW": `^\d{3}(\d{2})?$`,
- "SG": `^\d{6}$`,
- "DZ": `^\d{5}$`,
- "AD": `^AD\d{3}$`,
- "AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`,
- "AM": `^(37)?\d{4}$`,
- "AZ": `^\d{4}$`,
- "BH": `^((1[0-2]|[2-9])\d{2})?$`,
- "BD": `^\d{4}$`,
- "BB": `^(BB\d{5})?$`,
- "BY": `^\d{6}$`,
- "BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`,
- "BA": `^\d{5}$`,
- "IO": `^BBND 1ZZ$`,
- "BN": `^[A-Z]{2}[ ]?\d{4}$`,
- "BG": `^\d{4}$`,
- "KH": `^\d{5}$`,
- "CV": `^\d{4}$`,
- "CL": `^\d{7}$`,
- "CR": `^\d{4,5}|\d{3}-\d{4}$`,
- "HR": `^\d{5}$`,
- "CY": `^\d{4}$`,
- "CZ": `^\d{3}[ ]?\d{2}$`,
- "DO": `^\d{5}$`,
- "EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`,
- "EG": `^\d{5}$`,
- "EE": `^\d{5}$`,
- "FO": `^\d{3}$`,
- "GE": `^\d{4}$`,
- "GR": `^\d{3}[ ]?\d{2}$`,
- "GL": `^39\d{2}$`,
- "GT": `^\d{5}$`,
- "HT": `^\d{4}$`,
- "HN": `^(?:\d{5})?$`,
- "HU": `^\d{4}$`,
- "IS": `^\d{3}$`,
- "IN": `^\d{6}$`,
- "ID": `^\d{5}$`,
- "IL": `^\d{5}$`,
- "JO": `^\d{5}$`,
- "KZ": `^\d{6}$`,
- "KE": `^\d{5}$`,
- "KW": `^\d{5}$`,
- "LA": `^\d{5}$`,
- "LV": `^\d{4}$`,
- "LB": `^(\d{4}([ ]?\d{4})?)?$`,
- "LI": `^(948[5-9])|(949[0-7])$`,
- "LT": `^\d{5}$`,
- "LU": `^\d{4}$`,
- "MK": `^\d{4}$`,
- "MY": `^\d{5}$`,
- "MV": `^\d{5}$`,
- "MT": `^[A-Z]{3}[ ]?\d{2,4}$`,
- "MU": `^(\d{3}[A-Z]{2}\d{3})?$`,
- "MX": `^\d{5}$`,
- "MD": `^\d{4}$`,
- "MC": `^980\d{2}$`,
- "MA": `^\d{5}$`,
- "NP": `^\d{5}$`,
- "NZ": `^\d{4}$`,
- "NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`,
- "NG": `^(\d{6})?$`,
- "OM": `^(PC )?\d{3}$`,
- "PK": `^\d{5}$`,
- "PY": `^\d{4}$`,
- "PH": `^\d{4}$`,
- "PL": `^\d{2}-\d{3}$`,
- "PR": `^00[679]\d{2}([ \-]\d{4})?$`,
- "RO": `^\d{6}$`,
- "RU": `^\d{6}$`,
- "SM": `^4789\d$`,
- "SA": `^\d{5}$`,
- "SN": `^\d{5}$`,
- "SK": `^\d{3}[ ]?\d{2}$`,
- "SI": `^\d{4}$`,
- "ZA": `^\d{4}$`,
- "LK": `^\d{5}$`,
- "TJ": `^\d{6}$`,
- "TH": `^\d{5}$`,
- "TN": `^\d{4}$`,
- "TR": `^\d{5}$`,
- "TM": `^\d{6}$`,
- "UA": `^\d{5}$`,
- "UY": `^\d{5}$`,
- "UZ": `^\d{6}$`,
- "VA": `^00120$`,
- "VE": `^\d{4}$`,
- "ZM": `^\d{5}$`,
- "AS": `^96799$`,
- "CC": `^6799$`,
- "CK": `^\d{4}$`,
- "RS": `^\d{6}$`,
- "ME": `^8\d{4}$`,
- "CS": `^\d{5}$`,
- "YU": `^\d{5}$`,
- "CX": `^6798$`,
- "ET": `^\d{4}$`,
- "FK": `^FIQQ 1ZZ$`,
- "NF": `^2899$`,
- "FM": `^(9694[1-4])([ \-]\d{4})?$`,
- "GF": `^9[78]3\d{2}$`,
- "GN": `^\d{3}$`,
- "GP": `^9[78][01]\d{2}$`,
- "GS": `^SIQQ 1ZZ$`,
- "GU": `^969[123]\d([ \-]\d{4})?$`,
- "GW": `^\d{4}$`,
- "HM": `^\d{4}$`,
- "IQ": `^\d{5}$`,
- "KG": `^\d{6}$`,
- "LR": `^\d{4}$`,
- "LS": `^\d{3}$`,
- "MG": `^\d{3}$`,
- "MH": `^969[67]\d([ \-]\d{4})?$`,
- "MN": `^\d{6}$`,
- "MP": `^9695[012]([ \-]\d{4})?$`,
- "MQ": `^9[78]2\d{2}$`,
- "NC": `^988\d{2}$`,
- "NE": `^\d{4}$`,
- "VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
- "VN": `^[0-9]{1,6}$`,
- "PF": `^987\d{2}$`,
- "PG": `^\d{3}$`,
- "PM": `^9[78]5\d{2}$`,
- "PN": `^PCRN 1ZZ$`,
- "PW": `^96940$`,
- "RE": `^9[78]4\d{2}$`,
- "SH": `^(ASCN|STHL) 1ZZ$`,
- "SJ": `^\d{4}$`,
- "SO": `^\d{5}$`,
- "SZ": `^[HLMS]\d{3}$`,
- "TC": `^TKCA 1ZZ$`,
- "WF": `^986\d{2}$`,
- "XK": `^\d{5}$`,
- "YT": `^976\d{2}$`,
-}
-
-var postCodeRegexDict = map[string]*regexp.Regexp{}
-
-func init() {
- for countryCode, pattern := range postCodePatternDict {
- postCodeRegexDict[countryCode] = regexp.MustCompile(pattern)
- }
-}
diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go
deleted file mode 100644
index ba450b3d05..0000000000
--- a/vendor/github.com/go-playground/validator/v10/regexes.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package validator
-
-import "regexp"
-
-const (
- alphaRegexString = "^[a-zA-Z]+$"
- alphaNumericRegexString = "^[a-zA-Z0-9]+$"
- alphaUnicodeRegexString = "^[\\p{L}]+$"
- alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
- numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
- numberRegexString = "^[0-9]+$"
- hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
- hexColorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$"
- rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
- rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
- hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
- hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
- emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
- e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
- base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
- base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
- base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$"
- iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
- iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
- uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
- uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
- uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
- uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
- uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
- uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
- uLIDRegexString = "^[A-HJKMNP-TV-Z0-9]{26}$"
- md4RegexString = "^[0-9a-f]{32}$"
- md5RegexString = "^[0-9a-f]{32}$"
- sha256RegexString = "^[0-9a-f]{64}$"
- sha384RegexString = "^[0-9a-f]{96}$"
- sha512RegexString = "^[0-9a-f]{128}$"
- ripemd128RegexString = "^[0-9a-f]{32}$"
- ripemd160RegexString = "^[0-9a-f]{40}$"
- tiger128RegexString = "^[0-9a-f]{32}$"
- tiger160RegexString = "^[0-9a-f]{40}$"
- tiger192RegexString = "^[0-9a-f]{48}$"
- aSCIIRegexString = "^[\x00-\x7F]*$"
- printableASCIIRegexString = "^[\x20-\x7E]*$"
- multibyteRegexString = "[^\x00-\x7F]"
- dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)`
- latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
- longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
- sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
- hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
- hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
- fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
- btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
- btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
- btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
- ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
- ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
- ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
- uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$`
- hTMLEncodedRegexString = `[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?`
- hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
- jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$"
- splitParamsRegexString = `'[^']*'|\S+`
- bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$`
- semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/
- dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9]){0,62}$"
- cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html
- mongodbRegexString = "^[a-f\\d]{24}$"
- cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})`
-)
-
-var (
- alphaRegex = regexp.MustCompile(alphaRegexString)
- alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
- alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
- alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
- numericRegex = regexp.MustCompile(numericRegexString)
- numberRegex = regexp.MustCompile(numberRegexString)
- hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
- hexColorRegex = regexp.MustCompile(hexColorRegexString)
- rgbRegex = regexp.MustCompile(rgbRegexString)
- rgbaRegex = regexp.MustCompile(rgbaRegexString)
- hslRegex = regexp.MustCompile(hslRegexString)
- hslaRegex = regexp.MustCompile(hslaRegexString)
- e164Regex = regexp.MustCompile(e164RegexString)
- emailRegex = regexp.MustCompile(emailRegexString)
- base64Regex = regexp.MustCompile(base64RegexString)
- base64URLRegex = regexp.MustCompile(base64URLRegexString)
- base64RawURLRegex = regexp.MustCompile(base64RawURLRegexString)
- iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
- iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
- uUID3Regex = regexp.MustCompile(uUID3RegexString)
- uUID4Regex = regexp.MustCompile(uUID4RegexString)
- uUID5Regex = regexp.MustCompile(uUID5RegexString)
- uUIDRegex = regexp.MustCompile(uUIDRegexString)
- uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
- uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
- uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
- uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
- uLIDRegex = regexp.MustCompile(uLIDRegexString)
- md4Regex = regexp.MustCompile(md4RegexString)
- md5Regex = regexp.MustCompile(md5RegexString)
- sha256Regex = regexp.MustCompile(sha256RegexString)
- sha384Regex = regexp.MustCompile(sha384RegexString)
- sha512Regex = regexp.MustCompile(sha512RegexString)
- ripemd128Regex = regexp.MustCompile(ripemd128RegexString)
- ripemd160Regex = regexp.MustCompile(ripemd160RegexString)
- tiger128Regex = regexp.MustCompile(tiger128RegexString)
- tiger160Regex = regexp.MustCompile(tiger160RegexString)
- tiger192Regex = regexp.MustCompile(tiger192RegexString)
- aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
- printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
- multibyteRegex = regexp.MustCompile(multibyteRegexString)
- dataURIRegex = regexp.MustCompile(dataURIRegexString)
- latitudeRegex = regexp.MustCompile(latitudeRegexString)
- longitudeRegex = regexp.MustCompile(longitudeRegexString)
- sSNRegex = regexp.MustCompile(sSNRegexString)
- hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
- hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
- fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
- btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
- btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
- btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
- ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
- uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
- hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
- hTMLRegex = regexp.MustCompile(hTMLRegexString)
- jWTRegex = regexp.MustCompile(jWTRegexString)
- splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
- bicRegex = regexp.MustCompile(bicRegexString)
- semverRegex = regexp.MustCompile(semverRegexString)
- dnsRegexRFC1035Label = regexp.MustCompile(dnsRegexStringRFC1035Label)
- cveRegex = regexp.MustCompile(cveRegexString)
- mongodbRegex = regexp.MustCompile(mongodbRegexString)
- cronRegex = regexp.MustCompile(cronRegexString)
-)
diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go
deleted file mode 100644
index 271328f710..0000000000
--- a/vendor/github.com/go-playground/validator/v10/struct_level.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package validator
-
-import (
- "context"
- "reflect"
-)
-
-// StructLevelFunc accepts all values needed for struct level validation
-type StructLevelFunc func(sl StructLevel)
-
-// StructLevelFuncCtx accepts all values needed for struct level validation
-// but also allows passing of contextual validation information via context.Context.
-type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
-
-// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx
-func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
- return func(ctx context.Context, sl StructLevel) {
- fn(sl)
- }
-}
-
-// StructLevel contains all the information and helper functions
-// to validate a struct
-type StructLevel interface {
-
- // Validator returns the main validation object, in case one wants to call validations internally.
- // this is so you don't have to use anonymous functions to get access to the validate
- // instance.
- Validator() *Validate
-
- // Top returns the top level struct, if any
- Top() reflect.Value
-
- // Parent returns the current fields parent struct, if any
- Parent() reflect.Value
-
- // Current returns the current struct.
- Current() reflect.Value
-
- // ExtractType gets the actual underlying type of field value.
- // It will dive into pointers, customTypes and return you the
- // underlying value and its kind.
- ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
-
- // ReportError reports an error just by passing the field and tag information
- //
- // NOTES:
- //
- // fieldName and altName get appended to the existing namespace that
- // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending
- // on the nesting
- //
- // tag can be an existing validation tag or just something you make up
- // and process on the flip side it's up to you.
- ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
-
- // ReportValidationErrors reports an error just by passing ValidationErrors
- //
- // NOTES:
- //
- // relativeNamespace and relativeActualNamespace get appended to the
- // existing namespace that validator is on.
- // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
- // on the nesting. most of the time they will be blank, unless you validate
- // at a level lower the current field depth
- ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
-}
-
-var _ StructLevel = new(validate)
-
-// Top returns the top level struct
-//
-// NOTE: this can be the same as the current struct being validated
-// if not is a nested struct.
-//
-// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an accurate value otherwise.
-func (v *validate) Top() reflect.Value {
- return v.top
-}
-
-// Parent returns the current structs parent
-//
-// NOTE: this can be the same as the current struct being validated
-// if not is a nested struct.
-//
-// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an accurate value otherwise.
-func (v *validate) Parent() reflect.Value {
- return v.slflParent
-}
-
-// Current returns the current struct.
-func (v *validate) Current() reflect.Value {
- return v.slCurrent
-}
-
-// Validator returns the main validation object, in case one want to call validations internally.
-func (v *validate) Validator() *Validate {
- return v.v
-}
-
-// ExtractType gets the actual underlying type of field value.
-func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
- return v.extractTypeInternal(field, false)
-}
-
-// ReportError reports an error just by passing the field and tag information
-func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
-
- fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
-
- if len(structFieldName) == 0 {
- structFieldName = fieldName
- }
-
- v.str1 = string(append(v.ns, fieldName...))
-
- if v.v.hasTagNameFunc || fieldName != structFieldName {
- v.str2 = string(append(v.actualNs, structFieldName...))
- } else {
- v.str2 = v.str1
- }
-
- if kind == reflect.Invalid {
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: tag,
- actualTag: tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(fieldName)),
- structfieldLen: uint8(len(structFieldName)),
- param: param,
- kind: kind,
- },
- )
- return
- }
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: tag,
- actualTag: tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(fieldName)),
- structfieldLen: uint8(len(structFieldName)),
- value: fv.Interface(),
- param: param,
- kind: kind,
- typ: fv.Type(),
- },
- )
-}
-
-// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
-//
-// NOTE: this function prepends the current namespace to the relative ones.
-func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
-
- var err *fieldError
-
- for i := 0; i < len(errs); i++ {
-
- err = errs[i].(*fieldError)
- err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
- err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
-
- v.errs = append(v.errs, err)
- }
-}
diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go
deleted file mode 100644
index 4d9d75c13a..0000000000
--- a/vendor/github.com/go-playground/validator/v10/translations.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package validator
-
-import ut "github.com/go-playground/universal-translator"
-
-// TranslationFunc is the function type used to register or override
-// custom translations
-type TranslationFunc func(ut ut.Translator, fe FieldError) string
-
-// RegisterTranslationsFunc allows for registering of translations
-// for a 'ut.Translator' for use within the 'TranslationFunc'
-type RegisterTranslationsFunc func(ut ut.Translator) error
diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go
deleted file mode 100644
index 3925cfe1cd..0000000000
--- a/vendor/github.com/go-playground/validator/v10/util.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package validator
-
-import (
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// extractTypeInternal gets the actual underlying type of field value.
-// It will dive into pointers, customTypes and return you the
-// underlying value and it's kind.
-func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
-
-BEGIN:
- switch current.Kind() {
- case reflect.Ptr:
-
- nullable = true
-
- if current.IsNil() {
- return current, reflect.Ptr, nullable
- }
-
- current = current.Elem()
- goto BEGIN
-
- case reflect.Interface:
-
- nullable = true
-
- if current.IsNil() {
- return current, reflect.Interface, nullable
- }
-
- current = current.Elem()
- goto BEGIN
-
- case reflect.Invalid:
- return current, reflect.Invalid, nullable
-
- default:
-
- if v.v.hasCustomFuncs {
-
- if fn, ok := v.v.customFuncs[current.Type()]; ok {
- current = reflect.ValueOf(fn(current))
- goto BEGIN
- }
- }
-
- return current, current.Kind(), nullable
- }
-}
-
-// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
-// returns the field, field kind and whether is was successful in retrieving the field at all.
-//
-// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
-// could not be retrieved because it didn't exist.
-func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) {
-
-BEGIN:
- current, kind, nullable = v.ExtractType(val)
- if kind == reflect.Invalid {
- return
- }
-
- if namespace == "" {
- found = true
- return
- }
-
- switch kind {
-
- case reflect.Ptr, reflect.Interface:
- return
-
- case reflect.Struct:
-
- typ := current.Type()
- fld := namespace
- var ns string
-
- if !typ.ConvertibleTo(timeType) {
-
- idx := strings.Index(namespace, namespaceSeparator)
-
- if idx != -1 {
- fld = namespace[:idx]
- ns = namespace[idx+1:]
- } else {
- ns = ""
- }
-
- bracketIdx := strings.Index(fld, leftBracket)
- if bracketIdx != -1 {
- fld = fld[:bracketIdx]
-
- ns = namespace[bracketIdx:]
- }
-
- val = current.FieldByName(fld)
- namespace = ns
- goto BEGIN
- }
-
- case reflect.Array, reflect.Slice:
- idx := strings.Index(namespace, leftBracket)
- idx2 := strings.Index(namespace, rightBracket)
-
- arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
-
- if arrIdx >= current.Len() {
- return
- }
-
- startIdx := idx2 + 1
-
- if startIdx < len(namespace) {
- if namespace[startIdx:startIdx+1] == namespaceSeparator {
- startIdx++
- }
- }
-
- val = current.Index(arrIdx)
- namespace = namespace[startIdx:]
- goto BEGIN
-
- case reflect.Map:
- idx := strings.Index(namespace, leftBracket) + 1
- idx2 := strings.Index(namespace, rightBracket)
-
- endIdx := idx2
-
- if endIdx+1 < len(namespace) {
- if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
- endIdx++
- }
- }
-
- key := namespace[idx:idx2]
-
- switch current.Type().Key().Kind() {
- case reflect.Int:
- i, _ := strconv.Atoi(key)
- val = current.MapIndex(reflect.ValueOf(i))
- namespace = namespace[endIdx+1:]
-
- case reflect.Int8:
- i, _ := strconv.ParseInt(key, 10, 8)
- val = current.MapIndex(reflect.ValueOf(int8(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Int16:
- i, _ := strconv.ParseInt(key, 10, 16)
- val = current.MapIndex(reflect.ValueOf(int16(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Int32:
- i, _ := strconv.ParseInt(key, 10, 32)
- val = current.MapIndex(reflect.ValueOf(int32(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Int64:
- i, _ := strconv.ParseInt(key, 10, 64)
- val = current.MapIndex(reflect.ValueOf(i))
- namespace = namespace[endIdx+1:]
-
- case reflect.Uint:
- i, _ := strconv.ParseUint(key, 10, 0)
- val = current.MapIndex(reflect.ValueOf(uint(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Uint8:
- i, _ := strconv.ParseUint(key, 10, 8)
- val = current.MapIndex(reflect.ValueOf(uint8(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Uint16:
- i, _ := strconv.ParseUint(key, 10, 16)
- val = current.MapIndex(reflect.ValueOf(uint16(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Uint32:
- i, _ := strconv.ParseUint(key, 10, 32)
- val = current.MapIndex(reflect.ValueOf(uint32(i)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Uint64:
- i, _ := strconv.ParseUint(key, 10, 64)
- val = current.MapIndex(reflect.ValueOf(i))
- namespace = namespace[endIdx+1:]
-
- case reflect.Float32:
- f, _ := strconv.ParseFloat(key, 32)
- val = current.MapIndex(reflect.ValueOf(float32(f)))
- namespace = namespace[endIdx+1:]
-
- case reflect.Float64:
- f, _ := strconv.ParseFloat(key, 64)
- val = current.MapIndex(reflect.ValueOf(f))
- namespace = namespace[endIdx+1:]
-
- case reflect.Bool:
- b, _ := strconv.ParseBool(key)
- val = current.MapIndex(reflect.ValueOf(b))
- namespace = namespace[endIdx+1:]
-
- // reflect.Type = string
- default:
- val = current.MapIndex(reflect.ValueOf(key))
- namespace = namespace[endIdx+1:]
- }
-
- goto BEGIN
- }
-
- // if got here there was more namespace, cannot go any deeper
- panic("Invalid field namespace")
-}
-
-// asInt returns the parameter as a int64
-// or panics if it can't convert
-func asInt(param string) int64 {
- i, err := strconv.ParseInt(param, 0, 64)
- panicIf(err)
-
- return i
-}
-
-// asIntFromTimeDuration parses param as time.Duration and returns it as int64
-// or panics on error.
-func asIntFromTimeDuration(param string) int64 {
- d, err := time.ParseDuration(param)
- if err != nil {
- // attempt parsing as an integer assuming nanosecond precision
- return asInt(param)
- }
- return int64(d)
-}
-
-// asIntFromType calls the proper function to parse param as int64,
-// given a field's Type t.
-func asIntFromType(t reflect.Type, param string) int64 {
- switch t {
- case timeDurationType:
- return asIntFromTimeDuration(param)
- default:
- return asInt(param)
- }
-}
-
-// asUint returns the parameter as a uint64
-// or panics if it can't convert
-func asUint(param string) uint64 {
-
- i, err := strconv.ParseUint(param, 0, 64)
- panicIf(err)
-
- return i
-}
-
-// asFloat returns the parameter as a float64
-// or panics if it can't convert
-func asFloat(param string) float64 {
-
- i, err := strconv.ParseFloat(param, 64)
- panicIf(err)
-
- return i
-}
-
-// asBool returns the parameter as a bool
-// or panics if it can't convert
-func asBool(param string) bool {
-
- i, err := strconv.ParseBool(param)
- panicIf(err)
-
- return i
-}
-
-func panicIf(err error) {
- if err != nil {
- panic(err.Error())
- }
-}
diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go
deleted file mode 100644
index 6f6d53ada7..0000000000
--- a/vendor/github.com/go-playground/validator/v10/validator.go
+++ /dev/null
@@ -1,485 +0,0 @@
-package validator
-
-import (
- "context"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// per validate construct
-type validate struct {
- v *Validate
- top reflect.Value
- ns []byte
- actualNs []byte
- errs ValidationErrors
- includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
- ffn FilterFunc
- slflParent reflect.Value // StructLevel & FieldLevel
- slCurrent reflect.Value // StructLevel & FieldLevel
- flField reflect.Value // StructLevel & FieldLevel
- cf *cField // StructLevel & FieldLevel
- ct *cTag // StructLevel & FieldLevel
- misc []byte // misc reusable
- str1 string // misc reusable
- str2 string // misc reusable
- fldIsPointer bool // StructLevel & FieldLevel
- isPartial bool
- hasExcludes bool
-}
-
-// parent and current will be the same the first run of validateStruct
-func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
-
- cs, ok := v.v.structCache.Get(typ)
- if !ok {
- cs = v.v.extractStructCache(current, typ.Name())
- }
-
- if len(ns) == 0 && len(cs.name) != 0 {
-
- ns = append(ns, cs.name...)
- ns = append(ns, '.')
-
- structNs = append(structNs, cs.name...)
- structNs = append(structNs, '.')
- }
-
- // ct is nil on top level struct, and structs as fields that have no tag info
- // so if nil or if not nil and the structonly tag isn't present
- if ct == nil || ct.typeof != typeStructOnly {
-
- var f *cField
-
- for i := 0; i < len(cs.fields); i++ {
-
- f = cs.fields[i]
-
- if v.isPartial {
-
- if v.ffn != nil {
- // used with StructFiltered
- if v.ffn(append(structNs, f.name...)) {
- continue
- }
-
- } else {
- // used with StructPartial & StructExcept
- _, ok = v.includeExclude[string(append(structNs, f.name...))]
-
- if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
- continue
- }
- }
- }
-
- v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags)
- }
- }
-
- // check if any struct level validations, after all field validations already checked.
- // first iteration will have no info about nostructlevel tag, and is checked prior to
- // calling the next iteration of validateStruct called from traverseField.
- if cs.fn != nil {
-
- v.slflParent = parent
- v.slCurrent = current
- v.ns = ns
- v.actualNs = structNs
-
- cs.fn(ctx, v)
- }
-}
-
-// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
-func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
- var typ reflect.Type
- var kind reflect.Kind
-
- current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
-
- switch kind {
- case reflect.Ptr, reflect.Interface, reflect.Invalid:
-
- if ct == nil {
- return
- }
-
- if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
- return
- }
-
- if ct.hasTag {
- if kind == reflect.Invalid {
- v.str1 = string(append(ns, cf.altName...))
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- param: ct.param,
- kind: kind,
- },
- )
- return
- }
-
- v.str1 = string(append(ns, cf.altName...))
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
- if !ct.runValidationWhenNil {
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: current.Type(),
- },
- )
- return
- }
- }
-
- case reflect.Struct:
-
- typ = current.Type()
-
- if !typ.ConvertibleTo(timeType) {
-
- if ct != nil {
-
- if ct.typeof == typeStructOnly {
- goto CONTINUE
- } else if ct.typeof == typeIsDefault {
- // set Field Level fields
- v.slflParent = parent
- v.flField = current
- v.cf = cf
- v.ct = ct
-
- if !ct.fn(ctx, v) {
- v.str1 = string(append(ns, cf.altName...))
-
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: typ,
- },
- )
- return
- }
- }
-
- ct = ct.next
- }
-
- if ct != nil && ct.typeof == typeNoStructLevel {
- return
- }
-
- CONTINUE:
- // if len == 0 then validating using 'Var' or 'VarWithValue'
- // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
- // VarWithField - this allows for validating against each field within the struct against a specific value
- // pretty handy in certain situations
- if len(cf.name) > 0 {
- ns = append(append(ns, cf.altName...), '.')
- structNs = append(append(structNs, cf.name...), '.')
- }
-
- v.validateStruct(ctx, parent, current, typ, ns, structNs, ct)
- return
- }
- }
-
- if ct == nil || !ct.hasTag {
- return
- }
-
- typ = current.Type()
-
-OUTER:
- for {
- if ct == nil {
- return
- }
-
- switch ct.typeof {
-
- case typeOmitEmpty:
-
- // set Field Level fields
- v.slflParent = parent
- v.flField = current
- v.cf = cf
- v.ct = ct
-
- if !hasValue(v) {
- return
- }
-
- ct = ct.next
- continue
-
- case typeEndKeys:
- return
-
- case typeDive:
-
- ct = ct.next
-
- // traverse slice or map here
- // or panic ;)
- switch kind {
- case reflect.Slice, reflect.Array:
-
- var i64 int64
- reusableCF := &cField{}
-
- for i := 0; i < current.Len(); i++ {
-
- i64 = int64(i)
-
- v.misc = append(v.misc[0:0], cf.name...)
- v.misc = append(v.misc, '[')
- v.misc = strconv.AppendInt(v.misc, i64, 10)
- v.misc = append(v.misc, ']')
-
- reusableCF.name = string(v.misc)
-
- if cf.namesEqual {
- reusableCF.altName = reusableCF.name
- } else {
-
- v.misc = append(v.misc[0:0], cf.altName...)
- v.misc = append(v.misc, '[')
- v.misc = strconv.AppendInt(v.misc, i64, 10)
- v.misc = append(v.misc, ']')
-
- reusableCF.altName = string(v.misc)
- }
- v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
- }
-
- case reflect.Map:
-
- var pv string
- reusableCF := &cField{}
-
- for _, key := range current.MapKeys() {
-
- pv = fmt.Sprintf("%v", key.Interface())
-
- v.misc = append(v.misc[0:0], cf.name...)
- v.misc = append(v.misc, '[')
- v.misc = append(v.misc, pv...)
- v.misc = append(v.misc, ']')
-
- reusableCF.name = string(v.misc)
-
- if cf.namesEqual {
- reusableCF.altName = reusableCF.name
- } else {
- v.misc = append(v.misc[0:0], cf.altName...)
- v.misc = append(v.misc, '[')
- v.misc = append(v.misc, pv...)
- v.misc = append(v.misc, ']')
-
- reusableCF.altName = string(v.misc)
- }
-
- if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
- v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
- // can be nil when just keys being validated
- if ct.next != nil {
- v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
- }
- } else {
- v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
- }
- }
-
- default:
- // throw error, if not a slice or map then should not have gotten here
- // bad dive tag
- panic("dive error! can't dive on a non slice or map")
- }
-
- return
-
- case typeOr:
-
- v.misc = v.misc[0:0]
-
- for {
-
- // set Field Level fields
- v.slflParent = parent
- v.flField = current
- v.cf = cf
- v.ct = ct
-
- if ct.fn(ctx, v) {
- if ct.isBlockEnd {
- ct = ct.next
- continue OUTER
- }
-
- // drain rest of the 'or' values, then continue or leave
- for {
-
- ct = ct.next
-
- if ct == nil {
- return
- }
-
- if ct.typeof != typeOr {
- continue OUTER
- }
-
- if ct.isBlockEnd {
- ct = ct.next
- continue OUTER
- }
- }
- }
-
- v.misc = append(v.misc, '|')
- v.misc = append(v.misc, ct.tag...)
-
- if ct.hasParam {
- v.misc = append(v.misc, '=')
- v.misc = append(v.misc, ct.param...)
- }
-
- if ct.isBlockEnd || ct.next == nil {
- // if we get here, no valid 'or' value and no more tags
- v.str1 = string(append(ns, cf.altName...))
-
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
-
- if ct.hasAlias {
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.actualAliasTag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: typ,
- },
- )
-
- } else {
-
- tVal := string(v.misc)[1:]
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: tVal,
- actualTag: tVal,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: typ,
- },
- )
- }
-
- return
- }
-
- ct = ct.next
- }
-
- default:
-
- // set Field Level fields
- v.slflParent = parent
- v.flField = current
- v.cf = cf
- v.ct = ct
-
- if !ct.fn(ctx, v) {
- v.str1 = string(append(ns, cf.altName...))
-
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: typ,
- },
- )
-
- return
- }
- ct = ct.next
- }
- }
-
-}
diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go
deleted file mode 100644
index d2ee8fe38b..0000000000
--- a/vendor/github.com/go-playground/validator/v10/validator_instance.go
+++ /dev/null
@@ -1,702 +0,0 @@
-package validator
-
-import (
- "context"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "sync"
- "time"
-
- ut "github.com/go-playground/universal-translator"
-)
-
-const (
- defaultTagName = "validate"
- utf8HexComma = "0x2C"
- utf8Pipe = "0x7C"
- tagSeparator = ","
- orSeparator = "|"
- tagKeySeparator = "="
- structOnlyTag = "structonly"
- noStructLevelTag = "nostructlevel"
- omitempty = "omitempty"
- isdefault = "isdefault"
- requiredWithoutAllTag = "required_without_all"
- requiredWithoutTag = "required_without"
- requiredWithTag = "required_with"
- requiredWithAllTag = "required_with_all"
- requiredIfTag = "required_if"
- requiredUnlessTag = "required_unless"
- skipUnlessTag = "skip_unless"
- excludedWithoutAllTag = "excluded_without_all"
- excludedWithoutTag = "excluded_without"
- excludedWithTag = "excluded_with"
- excludedWithAllTag = "excluded_with_all"
- excludedIfTag = "excluded_if"
- excludedUnlessTag = "excluded_unless"
- skipValidationTag = "-"
- diveTag = "dive"
- keysTag = "keys"
- endKeysTag = "endkeys"
- requiredTag = "required"
- namespaceSeparator = "."
- leftBracket = "["
- rightBracket = "]"
- restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
- restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
- restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
-)
-
-var (
- timeDurationType = reflect.TypeOf(time.Duration(0))
- timeType = reflect.TypeOf(time.Time{})
-
- defaultCField = &cField{namesEqual: true}
-)
-
-// FilterFunc is the type used to filter fields using
-// StructFiltered(...) function.
-// returning true results in the field being filtered/skipped from
-// validation
-type FilterFunc func(ns []byte) bool
-
-// CustomTypeFunc allows for overriding or adding custom field type handler functions
-// field = field value of the type to return a value to be validated
-// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
-type CustomTypeFunc func(field reflect.Value) interface{}
-
-// TagNameFunc allows for adding of a custom tag name parser
-type TagNameFunc func(field reflect.StructField) string
-
-type internalValidationFuncWrapper struct {
- fn FuncCtx
- runValidatinOnNil bool
-}
-
-// Validate contains the validator settings and cache
-type Validate struct {
- tagName string
- pool *sync.Pool
- hasCustomFuncs bool
- hasTagNameFunc bool
- tagNameFunc TagNameFunc
- structLevelFuncs map[reflect.Type]StructLevelFuncCtx
- customFuncs map[reflect.Type]CustomTypeFunc
- aliases map[string]string
- validations map[string]internalValidationFuncWrapper
- transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc
- rules map[reflect.Type]map[string]string
- tagCache *tagCache
- structCache *structCache
-}
-
-// New returns a new instance of 'validate' with sane defaults.
-// Validate is designed to be thread-safe and used as a singleton instance.
-// It caches information about your struct and validations,
-// in essence only parsing your validation tags once per struct type.
-// Using multiple instances neglects the benefit of caching.
-func New() *Validate {
-
- tc := new(tagCache)
- tc.m.Store(make(map[string]*cTag))
-
- sc := new(structCache)
- sc.m.Store(make(map[reflect.Type]*cStruct))
-
- v := &Validate{
- tagName: defaultTagName,
- aliases: make(map[string]string, len(bakedInAliases)),
- validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)),
- tagCache: tc,
- structCache: sc,
- }
-
- // must copy alias validators for separate validations to be used in each validator instance
- for k, val := range bakedInAliases {
- v.RegisterAlias(k, val)
- }
-
- // must copy validators for separate validations to be used in each instance
- for k, val := range bakedInValidators {
-
- switch k {
- // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
- case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag,
- excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag,
- skipUnlessTag:
- _ = v.registerValidation(k, wrapFunc(val), true, true)
- default:
- // no need to error check here, baked in will always be valid
- _ = v.registerValidation(k, wrapFunc(val), true, false)
- }
- }
-
- v.pool = &sync.Pool{
- New: func() interface{} {
- return &validate{
- v: v,
- ns: make([]byte, 0, 64),
- actualNs: make([]byte, 0, 64),
- misc: make([]byte, 32),
- }
- },
- }
-
- return v
-}
-
-// SetTagName allows for changing of the default tag name of 'validate'
-func (v *Validate) SetTagName(name string) {
- v.tagName = name
-}
-
-// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
-// validation information via context.Context.
-func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
- errs := make(map[string]interface{})
- for field, rule := range rules {
- if ruleObj, ok := rule.(map[string]interface{}); ok {
- if dataObj, ok := data[field].(map[string]interface{}); ok {
- err := v.ValidateMapCtx(ctx, dataObj, ruleObj)
- if len(err) > 0 {
- errs[field] = err
- }
- } else if dataObjs, ok := data[field].([]map[string]interface{}); ok {
- for _, obj := range dataObjs {
- err := v.ValidateMapCtx(ctx, obj, ruleObj)
- if len(err) > 0 {
- errs[field] = err
- }
- }
- } else {
- errs[field] = errors.New("The field: '" + field + "' is not a map to dive")
- }
- } else if ruleStr, ok := rule.(string); ok {
- err := v.VarCtx(ctx, data[field], ruleStr)
- if err != nil {
- errs[field] = err
- }
- }
- }
- return errs
-}
-
-// ValidateMap validates map data from a map of tags
-func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
- return v.ValidateMapCtx(context.Background(), data, rules)
-}
-
-// RegisterTagNameFunc registers a function to get alternate names for StructFields.
-//
-// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
-//
-// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
-// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
-// // skip if tag key says it should be ignored
-// if name == "-" {
-// return ""
-// }
-// return name
-// })
-func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
- v.tagNameFunc = fn
- v.hasTagNameFunc = true
-}
-
-// RegisterValidation adds a validation with the given tag
-//
-// NOTES:
-// - if the key already exists, the previous validation function will be replaced.
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error {
- return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...)
-}
-
-// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
-// allowing context.Context validation support.
-func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error {
- var nilCheckable bool
- if len(callValidationEvenIfNull) > 0 {
- nilCheckable = callValidationEvenIfNull[0]
- }
- return v.registerValidation(tag, fn, false, nilCheckable)
-}
-
-func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
- if len(tag) == 0 {
- return errors.New("function Key cannot be empty")
- }
-
- if fn == nil {
- return errors.New("function cannot be empty")
- }
-
- _, ok := restrictedTags[tag]
- if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
- panic(fmt.Sprintf(restrictedTagErr, tag))
- }
- v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
- return nil
-}
-
-// RegisterAlias registers a mapping of a single validation tag that
-// defines a common or complex set of validation(s) to simplify adding validation
-// to structs.
-//
-// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterAlias(alias, tags string) {
-
- _, ok := restrictedTags[alias]
-
- if ok || strings.ContainsAny(alias, restrictedTagChars) {
- panic(fmt.Sprintf(restrictedAliasErr, alias))
- }
-
- v.aliases[alias] = tags
-}
-
-// RegisterStructValidation registers a StructLevelFunc against a number of types.
-//
-// NOTE:
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
- v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
-}
-
-// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
-// of contextual validation information via context.Context.
-//
-// NOTE:
-// - this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
-
- if v.structLevelFuncs == nil {
- v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
- }
-
- for _, t := range types {
- tv := reflect.ValueOf(t)
- if tv.Kind() == reflect.Ptr {
- t = reflect.Indirect(tv).Interface()
- }
-
- v.structLevelFuncs[reflect.TypeOf(t)] = fn
- }
-}
-
-// RegisterStructValidationMapRules registers validate map rules.
-// Be aware that map validation rules supersede those defined on a/the struct if present.
-//
-// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterStructValidationMapRules(rules map[string]string, types ...interface{}) {
- if v.rules == nil {
- v.rules = make(map[reflect.Type]map[string]string)
- }
-
- deepCopyRules := make(map[string]string)
- for i, rule := range rules {
- deepCopyRules[i] = rule
- }
-
- for _, t := range types {
- typ := reflect.TypeOf(t)
-
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
-
- if typ.Kind() != reflect.Struct {
- continue
- }
- v.rules[typ] = deepCopyRules
- }
-}
-
-// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
-//
-// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
-func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
-
- if v.customFuncs == nil {
- v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
- }
-
- for _, t := range types {
- v.customFuncs[reflect.TypeOf(t)] = fn
- }
-
- v.hasCustomFuncs = true
-}
-
-// RegisterTranslation registers translations against the provided tag.
-func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
-
- if v.transTagFunc == nil {
- v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
- }
-
- if err = registerFn(trans); err != nil {
- return
- }
-
- m, ok := v.transTagFunc[trans]
- if !ok {
- m = make(map[string]TranslationFunc)
- v.transTagFunc[trans] = m
- }
-
- m[tag] = translationFn
-
- return
-}
-
-// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) Struct(s interface{}) error {
- return v.StructCtx(context.Background(), s)
-}
-
-// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
-// and also allows passing of context.Context for contextual validation information.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
-
- val := reflect.ValueOf(s)
- top := val
-
- if val.Kind() == reflect.Ptr && !val.IsNil() {
- val = val.Elem()
- }
-
- if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
- return &InvalidValidationError{Type: reflect.TypeOf(s)}
- }
-
- // good to validate
- vd := v.pool.Get().(*validate)
- vd.top = top
- vd.isPartial = false
- // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
-
- vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
-
- v.pool.Put(vd)
-
- return
-}
-
-// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
-// nested structs, unless otherwise specified.
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
- return v.StructFilteredCtx(context.Background(), s, fn)
-}
-
-// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
-// nested structs, unless otherwise specified and also allows passing of contextual validation information via
-// context.Context
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
- val := reflect.ValueOf(s)
- top := val
-
- if val.Kind() == reflect.Ptr && !val.IsNil() {
- val = val.Elem()
- }
-
- if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
- return &InvalidValidationError{Type: reflect.TypeOf(s)}
- }
-
- // good to validate
- vd := v.pool.Get().(*validate)
- vd.top = top
- vd.isPartial = true
- vd.ffn = fn
- // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
-
- vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
-
- v.pool.Put(vd)
-
- return
-}
-
-// StructPartial validates the fields passed in only, ignoring all others.
-// Fields may be provided in a namespaced fashion relative to the struct provided
-// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructPartial(s interface{}, fields ...string) error {
- return v.StructPartialCtx(context.Background(), s, fields...)
-}
-
-// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
-// validation information via context.Context
-// Fields may be provided in a namespaced fashion relative to the struct provided
-// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
- val := reflect.ValueOf(s)
- top := val
-
- if val.Kind() == reflect.Ptr && !val.IsNil() {
- val = val.Elem()
- }
-
- if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
- return &InvalidValidationError{Type: reflect.TypeOf(s)}
- }
-
- // good to validate
- vd := v.pool.Get().(*validate)
- vd.top = top
- vd.isPartial = true
- vd.ffn = nil
- vd.hasExcludes = false
- vd.includeExclude = make(map[string]struct{})
-
- typ := val.Type()
- name := typ.Name()
-
- for _, k := range fields {
-
- flds := strings.Split(k, namespaceSeparator)
- if len(flds) > 0 {
-
- vd.misc = append(vd.misc[0:0], name...)
- // Don't append empty name for unnamed structs
- if len(vd.misc) != 0 {
- vd.misc = append(vd.misc, '.')
- }
-
- for _, s := range flds {
-
- idx := strings.Index(s, leftBracket)
-
- if idx != -1 {
- for idx != -1 {
- vd.misc = append(vd.misc, s[:idx]...)
- vd.includeExclude[string(vd.misc)] = struct{}{}
-
- idx2 := strings.Index(s, rightBracket)
- idx2++
- vd.misc = append(vd.misc, s[idx:idx2]...)
- vd.includeExclude[string(vd.misc)] = struct{}{}
- s = s[idx2:]
- idx = strings.Index(s, leftBracket)
- }
- } else {
-
- vd.misc = append(vd.misc, s...)
- vd.includeExclude[string(vd.misc)] = struct{}{}
- }
-
- vd.misc = append(vd.misc, '.')
- }
- }
- }
-
- vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
-
- v.pool.Put(vd)
-
- return
-}
-
-// StructExcept validates all fields except the ones passed in.
-// Fields may be provided in a namespaced fashion relative to the struct provided
-// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructExcept(s interface{}, fields ...string) error {
- return v.StructExceptCtx(context.Background(), s, fields...)
-}
-
-// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
-// validation information via context.Context
-// Fields may be provided in a namespaced fashion relative to the struct provided
-// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
- val := reflect.ValueOf(s)
- top := val
-
- if val.Kind() == reflect.Ptr && !val.IsNil() {
- val = val.Elem()
- }
-
- if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
- return &InvalidValidationError{Type: reflect.TypeOf(s)}
- }
-
- // good to validate
- vd := v.pool.Get().(*validate)
- vd.top = top
- vd.isPartial = true
- vd.ffn = nil
- vd.hasExcludes = true
- vd.includeExclude = make(map[string]struct{})
-
- typ := val.Type()
- name := typ.Name()
-
- for _, key := range fields {
-
- vd.misc = vd.misc[0:0]
-
- if len(name) > 0 {
- vd.misc = append(vd.misc, name...)
- vd.misc = append(vd.misc, '.')
- }
-
- vd.misc = append(vd.misc, key...)
- vd.includeExclude[string(vd.misc)] = struct{}{}
- }
-
- vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
-
- v.pool.Put(vd)
-
- return
-}
-
-// Var validates a single variable using tag style validation.
-// eg.
-// var i int
-// validate.Var(i, "gt=1,lt=10")
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforeseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) Var(field interface{}, tag string) error {
- return v.VarCtx(context.Background(), field, tag)
-}
-
-// VarCtx validates a single variable using tag style validation and allows passing of contextual
-// validation information via context.Context.
-// eg.
-// var i int
-// validate.Var(i, "gt=1,lt=10")
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforeseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
- if len(tag) == 0 || tag == skipValidationTag {
- return nil
- }
-
- ctag := v.fetchCacheTag(tag)
-
- val := reflect.ValueOf(field)
- vd := v.pool.Get().(*validate)
- vd.top = val
- vd.isPartial = false
- vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
- v.pool.Put(vd)
- return
-}
-
-// VarWithValue validates a single variable, against another variable/field's value using tag style validation
-// eg.
-// s1 := "abcd"
-// s2 := "abcd"
-// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforeseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
- return v.VarWithValueCtx(context.Background(), field, other, tag)
-}
-
-// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
-// allows passing of contextual validation validation information via context.Context.
-// eg.
-// s1 := "abcd"
-// s2 := "abcd"
-// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
-//
-// WARNING: a struct can be passed for validation eg. time.Time is a struct or
-// if you have a custom type and have registered a custom type handler, so must
-// allow it; however unforeseen validations will occur if trying to validate a
-// struct that is meant to be passed to 'validate.Struct'
-//
-// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
-// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
-// validate Array, Slice and maps fields which may contain more than one error
-func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
- if len(tag) == 0 || tag == skipValidationTag {
- return nil
- }
- ctag := v.fetchCacheTag(tag)
- otherVal := reflect.ValueOf(other)
- vd := v.pool.Get().(*validate)
- vd.top = otherVal
- vd.isPartial = false
- vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
-
- if len(vd.errs) > 0 {
- err = vd.errs
- vd.errs = nil
- }
- v.pool.Put(vd)
- return
-}
diff --git a/vendor/github.com/google/trillian/.golangci.yaml b/vendor/github.com/google/trillian/.golangci.yaml
index 4784f8fde4..0c50dcbda2 100644
--- a/vendor/github.com/google/trillian/.golangci.yaml
+++ b/vendor/github.com/google/trillian/.golangci.yaml
@@ -15,22 +15,6 @@ linters-settings:
- golang.org/x/net/context
- github.com/gogo/protobuf/proto
-linters:
- disable-all: true
- enable:
- - depguard
- - gocyclo
- - gofmt
- - goimports
- - govet
- - ineffassign
- - megacheck
- - misspell
- - revive
- - unused
- # TODO(gbelvin): write license linter and commit to upstream.
- # ./scripts/check_license.sh is run by ./scripts/presubmit.sh
-
issues:
# Don't turn off any checks by default. We can do this explicitly if needed.
exclude-use-default: false
diff --git a/vendor/github.com/google/trillian/BUILD.bazel b/vendor/github.com/google/trillian/BUILD.bazel
deleted file mode 100644
index bbee3e0cb7..0000000000
--- a/vendor/github.com/google/trillian/BUILD.bazel
+++ /dev/null
@@ -1,55 +0,0 @@
-# This BUILD file contains Bazel build targets for clients of the Trillian API.
-# Bazel can be obtained from www.bazel.build
-#
-# Even where Bazel is not being used by client builds, these targets provide
-# a mechanism to determine which proto files are required for the API. For
-# example, the following command will list the proto files required to use
-# the Trillian Admin gRPC interface:
-#
-# bazel query --notool_deps --noimplicit_deps \
-# 'kind("source file", deps(:trillian_admin_api_proto))'
-package(default_visibility = ["//visibility:public"])
-
-# A proto library for the Trillian Admin gRPC API.
-proto_library(
- name = "trillian_admin_api_proto",
- srcs = [
- "trillian_admin_api.proto",
- ],
- deps = [
- ":trillian_proto",
- "@com_google_googleapis//google/api:annotations_proto",
- "@com_google_googleapis//google/rpc:status_proto",
- "@com_google_protobuf//:field_mask_proto",
- ],
-)
-
-# A proto library for the Trillian Log gRPC API.
-proto_library(
- name = "trillian_log_api_proto",
- srcs = [
- "trillian_log_api.proto",
- ],
- deps = [
- ":trillian_proto",
- "@com_google_googleapis//google/api:annotations_proto",
- "@com_google_googleapis//google/rpc:status_proto",
- "@com_google_protobuf//:api_proto",
- "@com_google_protobuf//:timestamp_proto",
- ],
-)
-
-# Common proto definitions used within the Trillian gRPC APIs.
-proto_library(
- name = "trillian_proto",
- srcs = [
- "crypto/keyspb/keyspb.proto",
- "trillian.proto",
- ],
- deps = [
- "@com_google_protobuf//:any_proto",
- "@com_google_protobuf//:api_proto",
- "@com_google_protobuf//:duration_proto",
- "@com_google_protobuf//:timestamp_proto",
- ],
-)
diff --git a/vendor/github.com/google/trillian/CHANGELOG.md b/vendor/github.com/google/trillian/CHANGELOG.md
index 7a072252d2..b7f2393d1a 100644
--- a/vendor/github.com/google/trillian/CHANGELOG.md
+++ b/vendor/github.com/google/trillian/CHANGELOG.md
@@ -2,6 +2,32 @@
## HEAD
+## v.1.5.2
+
+* Recommended go version for development: 1.19
+ * This is the version used by the cloudbuild presubmits. Using a
+ different version can lead to presubmits failing due to unexpected
+ diffs.
+
+### Storage
+
+#### CloudSpanner
+
+* Removed use of the `--cloudspanner_write_sessions` flag.
+ This was related to preparing some fraction of CloudSpanner sessionpool entries with
+ Read/Write transactions, however this functionality is no longer supported by the client
+ library.
+
+### Repo config
+* Enable all lint checks in trillian repo by @mhutchinson in https://github.com/google/trillian/pull/2979
+
+### Dependency updates
+
+* Bump contrib.go.opencensus.io/exporter/stackdriver from 0.13.12 to 0.13.14 by @samuelattwood in https://github.com/google/trillian/pull/2950
+* Bump Go version from 1.17 to 1.19.
+* Updated golangci-lint to v1.51.1 (developers should update to this version)
+* Update transparency-dev/merkle to v0.0.2
+
## v1.5.1
### Storage
@@ -10,6 +36,7 @@
with support provided by Equinix Metal.
### Misc
+
* Fix log server not exiting properly on SIGINT
### Dependency updates
diff --git a/vendor/github.com/google/trillian/README.md b/vendor/github.com/google/trillian/README.md
index 9ebb3a2dee..76f51fe951 100644
--- a/vendor/github.com/google/trillian/README.md
+++ b/vendor/github.com/google/trillian/README.md
@@ -73,7 +73,7 @@ The current state of feature implementation is recorded in the
To build and test Trillian you need:
- - Go 1.17 or later (go 1.17 matches cloudbuild, and is preferred for developers
+ - Go 1.19 or later (go 1.19 matches cloudbuild, and is preferred for developers
that will be submitting PRs to this project).
To run many of the tests (and production deployment) you need:
@@ -193,7 +193,6 @@ go generate -x ./... # hunts for //go:generate comments and runs them
The Trillian codebase uses go.mod to declare fixed versions of its dependencies.
With Go modules, updating a dependency simply involves running `go get`:
```
-export GO111MODULE=on
go get package/path # Fetch the latest published version
go get package/path@X.Y.Z # Fetch a specific published version
go get package/path@HEAD # Fetch the latest commit
@@ -215,7 +214,7 @@ and tests over the codebase.
#### Install [golangci-lint](https://github.com/golangci/golangci-lint#local-installation).
```bash
-go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.47.3
+go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.1
```
#### Run code generation, build, test and linters
diff --git a/vendor/github.com/google/trillian/client/admin.go b/vendor/github.com/google/trillian/client/admin.go
deleted file mode 100644
index e48416e5ef..0000000000
--- a/vendor/github.com/google/trillian/client/admin.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2018 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/google/trillian"
- "github.com/google/trillian/client/backoff"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
- "k8s.io/klog/v2"
-)
-
-// CreateAndInitTree uses the adminClient and logClient to create the tree
-// described by req.
-// If req describes a LOG tree, then this function will also call the InitLog
-// function using logClient.
-// Internally, the function will continue to retry failed requests until either
-// the tree is created (and if necessary, initialised) successfully, or ctx is
-// cancelled.
-func CreateAndInitTree(
- ctx context.Context,
- req *trillian.CreateTreeRequest,
- adminClient trillian.TrillianAdminClient,
- logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
- b := &backoff.Backoff{
- Min: 100 * time.Millisecond,
- Max: 10 * time.Second,
- Factor: 2,
- Jitter: true,
- }
-
- var tree *trillian.Tree
- err := b.Retry(ctx, func() error {
- klog.Info("CreateTree...")
- var err error
- tree, err = adminClient.CreateTree(ctx, req)
- switch code := status.Code(err); code {
- case codes.Unavailable:
- klog.Errorf("Admin server unavailable: %v", err)
- return err
- case codes.OK:
- return nil
- default:
- klog.Errorf("failed to CreateTree(%+v): %T %v", req, err, err)
- return err
- }
- })
- if err != nil {
- return nil, err
- }
-
- switch tree.TreeType {
- case trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG:
- if err := InitLog(ctx, tree, logClient); err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("don't know how or whether to initialise tree type %v", tree.TreeType)
- }
-
- return tree, nil
-}
-
-// InitLog initialises a freshly created Log tree.
-func InitLog(ctx context.Context, tree *trillian.Tree, logClient trillian.TrillianLogClient) error {
- if tree.TreeType != trillian.TreeType_LOG &&
- tree.TreeType != trillian.TreeType_PREORDERED_LOG {
- return fmt.Errorf("InitLog called with tree of type %v", tree.TreeType)
- }
-
- b := &backoff.Backoff{
- Min: 100 * time.Millisecond,
- Max: 10 * time.Second,
- Factor: 2,
- Jitter: true,
- }
-
- err := b.Retry(ctx, func() error {
- klog.Infof("Initialising Log %v...", tree.TreeId)
- req := &trillian.InitLogRequest{LogId: tree.TreeId}
- resp, err := logClient.InitLog(ctx, req)
- switch code := status.Code(err); code {
- case codes.Unavailable:
- klog.Errorf("Log server unavailable: %v", err)
- return err
- case codes.AlreadyExists:
- klog.Warningf("Bizarrely, the just-created Log (%v) is already initialised!: %v", tree.TreeId, err)
- return err
- case codes.OK:
- klog.Infof("Initialised Log (%v) with new SignedTreeHead:\n%+v",
- tree.TreeId, resp.Created)
- return nil
- default:
- klog.Errorf("failed to InitLog(%+v): %T %v", req, err, err)
- return err
- }
- })
- if err != nil {
- return err
- }
-
- // Wait for log root to become available.
- return b.Retry(ctx, func() error {
- _, err := logClient.GetLatestSignedLogRoot(ctx,
- &trillian.GetLatestSignedLogRootRequest{LogId: tree.TreeId})
- return err
- }, codes.FailedPrecondition)
-}
diff --git a/vendor/github.com/google/trillian/client/backoff/backoff.go b/vendor/github.com/google/trillian/client/backoff/backoff.go
deleted file mode 100644
index c2b4acb281..0000000000
--- a/vendor/github.com/google/trillian/client/backoff/backoff.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package backoff allows retrying an operation with backoff.
-package backoff
-
-import (
- "context"
- "fmt"
- "math/rand"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// RetriableError explicitly instructs Backoff to retry.
-type RetriableError string
-
-// Error returns string representation of the retriable error.
-func (re RetriableError) Error() string {
- return string(re)
-}
-
-// RetriableErrorf wraps a formatted string into a RetriableError.
-func RetriableErrorf(format string, a ...interface{}) error {
- return RetriableError(fmt.Sprintf(format, a...))
-}
-
-// Backoff specifies the parameters of the backoff algorithm. Works correctly
-// if 0 < Min <= Max <= 2^62 (nanosec), and Factor >= 1.
-type Backoff struct {
- Min time.Duration // Duration of the first pause.
- Max time.Duration // Max duration of a pause.
- Factor float64 // The factor of duration increase between iterations.
- Jitter bool // Add random noise to pauses.
-
- delta time.Duration // Current pause duration relative to Min, no jitter.
-}
-
-// Duration returns the time to wait on current retry iteration. Every time
-// Duration is called, the returned value will exponentially increase by Factor
-// until Backoff.Max. If Jitter is enabled, will add an additional random value
-// between 0 and the duration, so the result can at most double.
-func (b *Backoff) Duration() time.Duration {
- base := b.Min + b.delta
- pause := base
- if b.Jitter { // Add a number in the range [0, pause).
- pause += time.Duration(rand.Int63n(int64(pause)))
- }
-
- nextPause := time.Duration(float64(base) * b.Factor)
- if nextPause > b.Max || nextPause < b.Min { // Multiplication could overflow.
- nextPause = b.Max
- }
- b.delta = nextPause - b.Min
-
- return pause
-}
-
-// Reset sets the internal state back to first retry iteration.
-func (b *Backoff) Reset() {
- b.delta = 0
-}
-
-// Retry calls a function until it succeeds or the context is done.
-// It will backoff if the function returns a retryable error.
-// Once the context is done, retries will end and the most recent error will be returned.
-// Backoff is not reset by this function.
-func (b *Backoff) Retry(ctx context.Context, f func() error, retry ...codes.Code) error {
- // If the context is already done, don't make any attempts to call f.
- if ctx.Err() != nil {
- return ctx.Err()
- }
-
- // Try calling f while the error is retryable and ctx is not done.
- for {
- if err := f(); !IsRetryable(err, retry...) {
- return err
- }
- select {
- case <-time.After(b.Duration()):
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-}
-
-// IsRetryable returns false unless the error is explicitly retriable per
-// https://godoc.org/google.golang.org/grpc/codes,
-// or if the error codes is in retry. codes.OK is not retryable.
-func IsRetryable(err error, retry ...codes.Code) bool {
- code := status.Code(err)
- switch code {
- // Fast path.
- case codes.OK:
- return false
-
- // Debatable cases:
- case codes.DeadlineExceeded,
- codes.ResourceExhausted: // Retry with backoff.
- return true
-
- // Errors that are explicitly retryable:
- case codes.Unavailable, // Client can just retry the call.
- codes.Aborted: // Client can retry the read-modify-write function.
- return true
- }
-
- for _, c := range retry {
- if code == c {
- return true
- }
- }
-
- // Don't retry for all other errors, unless it is a RetriableError.
- _, ok := err.(RetriableError)
- return ok
-}
diff --git a/vendor/github.com/google/trillian/client/log_client.go b/vendor/github.com/google/trillian/client/log_client.go
deleted file mode 100644
index a13a916de4..0000000000
--- a/vendor/github.com/google/trillian/client/log_client.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2017 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package client verifies responses from the Trillian log.
-package client
-
-import (
- "bytes"
- "context"
- "fmt"
- "sort"
- "sync"
- "time"
-
- "github.com/google/trillian"
- "github.com/google/trillian/client/backoff"
- "github.com/google/trillian/types"
- "github.com/transparency-dev/merkle"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// LogClient represents a client for a given Trillian log instance.
-type LogClient struct {
- *LogVerifier
- LogID int64
- MinMergeDelay time.Duration
- client trillian.TrillianLogClient
- root types.LogRootV1
- rootLock sync.Mutex
- updateLock sync.Mutex
-}
-
-// New returns a new LogClient.
-func New(logID int64, client trillian.TrillianLogClient, verifier *LogVerifier, root types.LogRootV1) *LogClient {
- return &LogClient{
- LogVerifier: verifier,
- LogID: logID,
- client: client,
- root: root,
- }
-}
-
-// NewFromTree creates a new LogClient given a tree config.
-func NewFromTree(client trillian.TrillianLogClient, config *trillian.Tree, root types.LogRootV1) (*LogClient, error) {
- verifier, err := NewLogVerifierFromTree(config)
- if err != nil {
- return nil, err
- }
-
- return New(config.GetTreeId(), client, verifier, root), nil
-}
-
-// AddLeaf adds leaf to the append only log.
-// Blocks and continuously updates the trusted root until a successful inclusion proof
-// can be retrieved.
-func (c *LogClient) AddLeaf(ctx context.Context, data []byte) error {
- if err := c.QueueLeaf(ctx, data); err != nil {
- return fmt.Errorf("QueueLeaf(): %v", err)
- }
- if err := c.WaitForInclusion(ctx, data); err != nil {
- return fmt.Errorf("WaitForInclusion(): %v", err)
- }
- return nil
-}
-
-// ListByIndex returns the requested leaves by index.
-func (c *LogClient) ListByIndex(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) {
- resp, err := c.client.GetLeavesByRange(ctx,
- &trillian.GetLeavesByRangeRequest{
- LogId: c.LogID,
- StartIndex: start,
- Count: count,
- })
- if err != nil {
- return nil, err
- }
- // Verify that we got back the requested leaves.
- if len(resp.Leaves) < int(count) {
- return nil, fmt.Errorf("len(Leaves)=%d, want %d", len(resp.Leaves), count)
- }
- for i, l := range resp.Leaves {
- if want := start + int64(i); l.LeafIndex != want {
- return nil, fmt.Errorf("Leaves[%d].LeafIndex=%d, want %d", i, l.LeafIndex, want)
- }
- }
-
- return resp.Leaves, nil
-}
-
-// WaitForRootUpdate repeatedly fetches the latest root until there is an
-// update, which it then applies, or until ctx times out.
-func (c *LogClient) WaitForRootUpdate(ctx context.Context) (*types.LogRootV1, error) {
- b := &backoff.Backoff{
- Min: 100 * time.Millisecond,
- Max: 10 * time.Second,
- Factor: 2,
- Jitter: true,
- }
-
- for {
- newTrusted, err := c.UpdateRoot(ctx)
- switch status.Code(err) {
- case codes.OK:
- if newTrusted != nil {
- return newTrusted, nil
- }
- case codes.Unavailable, codes.NotFound, codes.FailedPrecondition:
- // Retry.
- default:
- return nil, err
- }
-
- select {
- case <-ctx.Done():
- return nil, status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
- case <-time.After(b.Duration()):
- }
- }
-}
-
-// getAndVerifyLatestRoot fetches and verifies the latest root against a trusted root, seen in the past.
-// Pass nil for trusted if this is the first time querying this log.
-func (c *LogClient) getAndVerifyLatestRoot(ctx context.Context, trusted *types.LogRootV1) (*types.LogRootV1, error) {
- resp, err := c.client.GetLatestSignedLogRoot(ctx,
- &trillian.GetLatestSignedLogRootRequest{
- LogId: c.LogID,
- FirstTreeSize: int64(trusted.TreeSize),
- })
- if err != nil {
- return nil, err
- }
-
- // TODO(gbelvin): Turn on root verification.
- /*
- logRoot, err := c.VerifyRoot(&types.LogRootV1{}, resp.GetSignedLogRoot(), nil)
- if err != nil {
- return nil, err
- }
- */
- // TODO(gbelvin): Remove this hack when all implementations store digital signatures.
- var logRoot types.LogRootV1
- if err := logRoot.UnmarshalBinary(resp.GetSignedLogRoot().LogRoot); err != nil {
- return nil, err
- }
-
- if trusted.TreeSize > 0 &&
- logRoot.TreeSize == trusted.TreeSize &&
- bytes.Equal(logRoot.RootHash, trusted.RootHash) {
- // Tree has not been updated.
- return &logRoot, nil
- }
-
- // Verify root update if the tree / the latest signed log root isn't empty.
- if logRoot.TreeSize > 0 {
- if _, err := c.VerifyRoot(trusted, resp.GetSignedLogRoot(), resp.GetProof().GetHashes()); err != nil {
- return nil, err
- }
- }
- return &logRoot, nil
-}
-
-// GetRoot returns a copy of the latest trusted root.
-func (c *LogClient) GetRoot() *types.LogRootV1 {
- c.rootLock.Lock()
- defer c.rootLock.Unlock()
-
- // Copy the internal trusted root in order to prevent clients from modifying it.
- ret := c.root
- return &ret
-}
-
-// UpdateRoot retrieves the current SignedLogRoot, verifying it against roots this client has
-// seen in the past, and updating the currently trusted root if the new root verifies, and is
-// newer than the currently trusted root.
-func (c *LogClient) UpdateRoot(ctx context.Context) (*types.LogRootV1, error) {
- // Only one root update should be running at any point in time, because
- // the update involves a consistency proof from the old value, and if the
- // old value could change along the way (in another goroutine) then the
- // result could be inconsistent.
- //
- // For example, if the current root is A and two root updates A->B and A->C
- // happen in parallel, then we might end up with the transitions A->B->C:
- // cur := A cur := A
- // getRoot() => B getRoot() => C
- // proof(A->B) ok proof(A->C) ok
- // c.root = B
- // c.root = C
- // and the last step (B->C) has no proof and so could hide a forked tree.
- c.updateLock.Lock()
- defer c.updateLock.Unlock()
-
- currentlyTrusted := c.GetRoot()
- newTrusted, err := c.getAndVerifyLatestRoot(ctx, currentlyTrusted)
- if err != nil {
- return nil, err
- }
-
- // Lock "rootLock" for the "root" update.
- c.rootLock.Lock()
- defer c.rootLock.Unlock()
-
- if newTrusted.TimestampNanos > currentlyTrusted.TimestampNanos &&
- newTrusted.TreeSize >= currentlyTrusted.TreeSize {
-
- // Take a copy of the new trusted root in order to prevent clients from modifying it.
- c.root = *newTrusted
-
- return newTrusted, nil
- }
-
- return nil, nil
-}
-
-// WaitForInclusion blocks until the requested data has been verified with an
-// inclusion proof.
-//
-// It will continuously update the root to the latest one available until the
-// data is found, or an error is returned.
-//
-// It is best to call this method with a context that will timeout to avoid
-// waiting forever.
-func (c *LogClient) WaitForInclusion(ctx context.Context, data []byte) error {
- leaf := prepareLeaf(c.hasher, data)
-
- // If a minimum merge delay has been configured, wait at least that long before
- // starting to poll
- if c.MinMergeDelay > 0 {
- select {
- case <-ctx.Done():
- return status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
- case <-time.After(c.MinMergeDelay):
- }
- }
-
- var root *types.LogRootV1
- for {
- root = c.GetRoot()
-
- // It is illegal to ask for an inclusion proof with TreeSize = 0.
- if root.TreeSize >= 1 {
- ok, err := c.getAndVerifyInclusionProof(ctx, leaf.MerkleLeafHash, root)
- if err != nil && status.Code(err) != codes.NotFound {
- return err
- } else if ok {
- return nil
- }
- }
-
- // If not found or tree is empty, wait for a root update before retrying again.
- if _, err := c.WaitForRootUpdate(ctx); err != nil {
- return err
- }
-
- // Retry
- }
-}
-
-func (c *LogClient) getAndVerifyInclusionProof(ctx context.Context, leafHash []byte, sth *types.LogRootV1) (bool, error) {
- resp, err := c.client.GetInclusionProofByHash(ctx,
- &trillian.GetInclusionProofByHashRequest{
- LogId: c.LogID,
- LeafHash: leafHash,
- TreeSize: int64(sth.TreeSize),
- })
- if err != nil {
- return false, err
- }
- if len(resp.Proof) < 1 {
- return false, nil
- }
- for _, proof := range resp.Proof {
- if err := c.VerifyInclusionByHash(sth, leafHash, proof); err != nil {
- return false, fmt.Errorf("VerifyInclusionByHash(): %v", err)
- }
- }
- return true, nil
-}
-
-// AddSequencedLeaves adds any number of pre-sequenced leaves to the log.
-// Indexes must be contiguous.
-func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int64][]byte) error {
- if len(dataByIndex) == 0 {
- return nil
- }
- leaves := make([]*trillian.LogLeaf, 0, len(dataByIndex))
- indexes := make([]int64, 0, len(dataByIndex))
- for index := range dataByIndex {
- indexes = append(indexes, index)
- }
- sort.Slice(indexes, func(a, b int) bool { return indexes[a] < indexes[b] })
-
- for i, index := range indexes {
- // Check index continuity.
- if want := indexes[0] + int64(i); index != want {
- return fmt.Errorf("missing index in contiugous index range. got: %v, want: %v", index, want)
- }
- leaf := prepareLeaf(c.hasher, dataByIndex[index])
- leaf.LeafIndex = index
- leaves = append(leaves, leaf)
- }
- resp, err := c.client.AddSequencedLeaves(ctx, &trillian.AddSequencedLeavesRequest{
- LogId: c.LogID,
- Leaves: leaves,
- })
- for _, leaf := range resp.GetResults() {
- if s := status.FromProto(leaf.GetStatus()); s.Code() != codes.OK && s.Code() != codes.AlreadyExists {
- return status.Errorf(s.Code(), "unexpected fail status in AddSequencedLeaves: %+v, err: %v", leaf, s.Message())
- }
- }
- return err
-}
-
-// QueueLeaf adds a leaf to a Trillian log without blocking.
-// AlreadyExists is considered a success case by this function.
-func (c *LogClient) QueueLeaf(ctx context.Context, data []byte) error {
- leaf := prepareLeaf(c.hasher, data)
- _, err := c.client.QueueLeaf(ctx, &trillian.QueueLeafRequest{
- LogId: c.LogID,
- Leaf: leaf,
- })
- return err
-}
-
-// prepareLeaf returns a trillian.LogLeaf prepopulated with leaf data and hash.
-func prepareLeaf(hasher merkle.LogHasher, data []byte) *trillian.LogLeaf {
- leafHash := hasher.HashLeaf(data)
- return &trillian.LogLeaf{
- LeafValue: data,
- MerkleLeafHash: leafHash,
- }
-}
diff --git a/vendor/github.com/google/trillian/client/log_verifier.go b/vendor/github.com/google/trillian/client/log_verifier.go
deleted file mode 100644
index 3e8ecfff11..0000000000
--- a/vendor/github.com/google/trillian/client/log_verifier.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "errors"
- "fmt"
-
- "github.com/google/trillian"
- "github.com/google/trillian/types"
- "github.com/transparency-dev/merkle"
- "github.com/transparency-dev/merkle/proof"
- "github.com/transparency-dev/merkle/rfc6962"
-)
-
-// LogVerifier allows verification of output from Trillian Logs, both regular
-// and pre-ordered; it is safe for concurrent use (as its contents are fixed
-// after construction).
-type LogVerifier struct {
- // hasher is the hash strategy used to compute nodes in the Merkle tree.
- hasher merkle.LogHasher
-}
-
-// NewLogVerifier returns an object that can verify output from Trillian Logs.
-func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier {
- return &LogVerifier{hasher: hasher}
-}
-
-// NewLogVerifierFromTree creates a new LogVerifier using the algorithms
-// specified by a Trillian Tree object.
-func NewLogVerifierFromTree(config *trillian.Tree) (*LogVerifier, error) {
- if config == nil {
- return nil, errors.New("client: NewLogVerifierFromTree(): nil config")
- }
- log, pLog := trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG
- if got := config.TreeType; got != log && got != pLog {
- return nil, fmt.Errorf("client: NewLogVerifierFromTree(): TreeType: %v, want %v or %v", got, log, pLog)
- }
-
- return NewLogVerifier(rfc6962.DefaultHasher), nil
-}
-
-// VerifyRoot verifies that newRoot is a valid append-only operation from
-// trusted. If trusted.TreeSize is zero, a consistency proof is not needed.
-func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.SignedLogRoot, consistency [][]byte) (*types.LogRootV1, error) {
- if trusted == nil {
- return nil, fmt.Errorf("VerifyRoot() error: trusted == nil")
- }
- if newRoot == nil {
- return nil, fmt.Errorf("VerifyRoot() error: newRoot == nil")
- }
-
- var r types.LogRootV1
- if err := r.UnmarshalBinary(newRoot.LogRoot); err != nil {
- return nil, err
- }
-
- // Implicitly trust the first root we get.
- if trusted.TreeSize != 0 {
- // Verify consistency proof.
- if err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil {
- return nil, fmt.Errorf("failed to verify consistency proof from %d->%d %x->%x: %v", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err)
- }
- }
- return &r, nil
-}
-
-// VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash
-// matches the given trusted root.
-func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, pf *trillian.Proof) error {
- if trusted == nil {
- return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil")
- }
- if pf == nil {
- return fmt.Errorf("VerifyInclusionByHash() error: proof == nil")
- }
-
- return proof.VerifyInclusion(c.hasher, uint64(pf.LeafIndex), trusted.TreeSize, leafHash, pf.Hashes, trusted.RootHash)
-}
diff --git a/vendor/github.com/google/trillian/cloudbuild.yaml b/vendor/github.com/google/trillian/cloudbuild.yaml
index b1ee8f780f..3e38dbed2a 100644
--- a/vendor/github.com/google/trillian/cloudbuild.yaml
+++ b/vendor/github.com/google/trillian/cloudbuild.yaml
@@ -11,7 +11,6 @@ options:
- name: go-modules
path: /go
env:
- - GO111MODULE=on
- GOPATH=/go
- GOLANG_PROTOBUF_REGISTRATION_CONFLICT=ignore # Temporary work-around v1.proto already registered error.
- DOCKER_CLIENT_TIMEOUT=120
diff --git a/vendor/github.com/google/trillian/trillian.pb.go b/vendor/github.com/google/trillian/trillian.pb.go
index 6855aca394..74d84d97e7 100644
--- a/vendor/github.com/google/trillian/trillian.pb.go
+++ b/vendor/github.com/google/trillian/trillian.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.30.0
// protoc v3.20.1
// source: trillian.proto
@@ -172,11 +172,11 @@ const (
TreeState_FROZEN TreeState = 2
// Deprecated: now tracked in Tree.deleted.
//
- // Deprecated: Do not use.
+ // Deprecated: Marked as deprecated in trillian.proto.
TreeState_DEPRECATED_SOFT_DELETED TreeState = 3
// Deprecated: now tracked in Tree.deleted.
//
- // Deprecated: Do not use.
+ // Deprecated: Marked as deprecated in trillian.proto.
TreeState_DEPRECATED_HARD_DELETED TreeState = 4
// A tree that is draining will continue to integrate queued entries.
// No new entries should be accepted.
@@ -461,19 +461,21 @@ type SignedLogRoot struct {
// in RFC5246 notation):
//
// enum { v1(1), (65535)} Version;
- // struct {
- // uint64 tree_size;
- // opaque root_hash<0..128>;
- // uint64 timestamp_nanos;
- // uint64 revision;
- // opaque metadata<0..65535>;
- // } LogRootV1;
- // struct {
- // Version version;
- // select(version) {
- // case v1: LogRootV1;
- // }
- // } LogRoot;
+ //
+ // struct {
+ // uint64 tree_size;
+ // opaque root_hash<0..128>;
+ // uint64 timestamp_nanos;
+ // uint64 revision;
+ // opaque metadata<0..65535>;
+ // } LogRootV1;
+ //
+ // struct {
+ // Version version;
+ // select(version) {
+ // case v1: LogRootV1;
+ // }
+ // } LogRoot;
//
// A serialized v1 log root will therefore be laid out as:
//
diff --git a/vendor/github.com/google/trillian/trillian_admin_api.pb.go b/vendor/github.com/google/trillian/trillian_admin_api.pb.go
index 2d97bbf1cb..a4123cdd49 100644
--- a/vendor/github.com/google/trillian/trillian_admin_api.pb.go
+++ b/vendor/github.com/google/trillian/trillian_admin_api.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.30.0
// protoc v3.20.1
// source: trillian_admin_api.proto
diff --git a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go
index 6253c03093..d01ca0759d 100644
--- a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go
+++ b/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go
@@ -1,6 +1,20 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
+// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: trillian_admin_api.proto
@@ -18,6 +32,15 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ TrillianAdmin_ListTrees_FullMethodName = "/trillian.TrillianAdmin/ListTrees"
+ TrillianAdmin_GetTree_FullMethodName = "/trillian.TrillianAdmin/GetTree"
+ TrillianAdmin_CreateTree_FullMethodName = "/trillian.TrillianAdmin/CreateTree"
+ TrillianAdmin_UpdateTree_FullMethodName = "/trillian.TrillianAdmin/UpdateTree"
+ TrillianAdmin_DeleteTree_FullMethodName = "/trillian.TrillianAdmin/DeleteTree"
+ TrillianAdmin_UndeleteTree_FullMethodName = "/trillian.TrillianAdmin/UndeleteTree"
+)
+
// TrillianAdminClient is the client API for TrillianAdmin service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -54,7 +77,7 @@ func NewTrillianAdminClient(cc grpc.ClientConnInterface) TrillianAdminClient {
func (c *trillianAdminClient) ListTrees(ctx context.Context, in *ListTreesRequest, opts ...grpc.CallOption) (*ListTreesResponse, error) {
out := new(ListTreesResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/ListTrees", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_ListTrees_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -63,7 +86,7 @@ func (c *trillianAdminClient) ListTrees(ctx context.Context, in *ListTreesReques
func (c *trillianAdminClient) GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/GetTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_GetTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -72,7 +95,7 @@ func (c *trillianAdminClient) GetTree(ctx context.Context, in *GetTreeRequest, o
func (c *trillianAdminClient) CreateTree(ctx context.Context, in *CreateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/CreateTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_CreateTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -81,7 +104,7 @@ func (c *trillianAdminClient) CreateTree(ctx context.Context, in *CreateTreeRequ
func (c *trillianAdminClient) UpdateTree(ctx context.Context, in *UpdateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/UpdateTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_UpdateTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -90,7 +113,7 @@ func (c *trillianAdminClient) UpdateTree(ctx context.Context, in *UpdateTreeRequ
func (c *trillianAdminClient) DeleteTree(ctx context.Context, in *DeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/DeleteTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_DeleteTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -99,7 +122,7 @@ func (c *trillianAdminClient) DeleteTree(ctx context.Context, in *DeleteTreeRequ
func (c *trillianAdminClient) UndeleteTree(ctx context.Context, in *UndeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
- err := c.cc.Invoke(ctx, "/trillian.TrillianAdmin/UndeleteTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianAdmin_UndeleteTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -176,7 +199,7 @@ func _TrillianAdmin_ListTrees_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/ListTrees",
+ FullMethod: TrillianAdmin_ListTrees_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).ListTrees(ctx, req.(*ListTreesRequest))
@@ -194,7 +217,7 @@ func _TrillianAdmin_GetTree_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/GetTree",
+ FullMethod: TrillianAdmin_GetTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).GetTree(ctx, req.(*GetTreeRequest))
@@ -212,7 +235,7 @@ func _TrillianAdmin_CreateTree_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/CreateTree",
+ FullMethod: TrillianAdmin_CreateTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).CreateTree(ctx, req.(*CreateTreeRequest))
@@ -230,7 +253,7 @@ func _TrillianAdmin_UpdateTree_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/UpdateTree",
+ FullMethod: TrillianAdmin_UpdateTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).UpdateTree(ctx, req.(*UpdateTreeRequest))
@@ -248,7 +271,7 @@ func _TrillianAdmin_DeleteTree_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/DeleteTree",
+ FullMethod: TrillianAdmin_DeleteTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).DeleteTree(ctx, req.(*DeleteTreeRequest))
@@ -266,7 +289,7 @@ func _TrillianAdmin_UndeleteTree_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianAdmin/UndeleteTree",
+ FullMethod: TrillianAdmin_UndeleteTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).UndeleteTree(ctx, req.(*UndeleteTreeRequest))
diff --git a/vendor/github.com/google/trillian/trillian_log_api.pb.go b/vendor/github.com/google/trillian/trillian_log_api.pb.go
index 738e46e437..c8cb663f21 100644
--- a/vendor/github.com/google/trillian/trillian_log_api.pb.go
+++ b/vendor/github.com/google/trillian/trillian_log_api.pb.go
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.30.0
// protoc v3.20.1
// source: trillian_log_api.proto
@@ -1225,17 +1225,17 @@ type QueuedLogLeaf struct {
unknownFields protoimpl.UnknownFields
// The leaf as it was stored by Trillian. Empty unless `status.code` is:
- // - `google.rpc.OK`: the `leaf` data is the same as in the request.
- // - `google.rpc.ALREADY_EXISTS` or 'google.rpc.FAILED_PRECONDITION`: the
- // `leaf` is the conflicting one already in the log.
+ // - `google.rpc.OK`: the `leaf` data is the same as in the request.
+ // - `google.rpc.ALREADY_EXISTS` or 'google.rpc.FAILED_PRECONDITION`: the
+ // `leaf` is the conflicting one already in the log.
Leaf *LogLeaf `protobuf:"bytes,1,opt,name=leaf,proto3" json:"leaf,omitempty"`
// The status of adding the leaf.
- // - `google.rpc.OK`: successfully added.
- // - `google.rpc.ALREADY_EXISTS`: the leaf is a duplicate of an already
- // existing one. Either `leaf_identity_hash` is the same in the `LOG`
- // mode, or `leaf_index` in the `PREORDERED_LOG`.
- // - `google.rpc.FAILED_PRECONDITION`: A conflicting entry is already
- // present in the log, e.g., same `leaf_index` but different `leaf_data`.
+ // - `google.rpc.OK`: successfully added.
+ // - `google.rpc.ALREADY_EXISTS`: the leaf is a duplicate of an already
+ // existing one. Either `leaf_identity_hash` is the same in the `LOG`
+ // mode, or `leaf_index` in the `PREORDERED_LOG`.
+ // - `google.rpc.FAILED_PRECONDITION`: A conflicting entry is already
+ // present in the log, e.g., same `leaf_index` but different `leaf_data`.
Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
}
@@ -1336,7 +1336,6 @@ type LogLeaf struct {
// whereas the Merkle leaf hash encompasses both the certificate and its
// submission time -- allowing duplicate certificates to be detected.
//
- //
// Continuing the CT example, for a CT mirror personality (which must allow
// dupes since the source log could contain them), the part of the
// personality which fetches and submits the entries might set
diff --git a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go
index 32e2ff8b39..679b9c525c 100644
--- a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go
+++ b/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go
@@ -1,6 +1,20 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
+// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: trillian_log_api.proto
@@ -18,6 +32,18 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ TrillianLog_QueueLeaf_FullMethodName = "/trillian.TrillianLog/QueueLeaf"
+ TrillianLog_GetInclusionProof_FullMethodName = "/trillian.TrillianLog/GetInclusionProof"
+ TrillianLog_GetInclusionProofByHash_FullMethodName = "/trillian.TrillianLog/GetInclusionProofByHash"
+ TrillianLog_GetConsistencyProof_FullMethodName = "/trillian.TrillianLog/GetConsistencyProof"
+ TrillianLog_GetLatestSignedLogRoot_FullMethodName = "/trillian.TrillianLog/GetLatestSignedLogRoot"
+ TrillianLog_GetEntryAndProof_FullMethodName = "/trillian.TrillianLog/GetEntryAndProof"
+ TrillianLog_InitLog_FullMethodName = "/trillian.TrillianLog/InitLog"
+ TrillianLog_AddSequencedLeaves_FullMethodName = "/trillian.TrillianLog/AddSequencedLeaves"
+ TrillianLog_GetLeavesByRange_FullMethodName = "/trillian.TrillianLog/GetLeavesByRange"
+)
+
// TrillianLogClient is the client API for TrillianLog service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -78,7 +104,7 @@ func NewTrillianLogClient(cc grpc.ClientConnInterface) TrillianLogClient {
func (c *trillianLogClient) QueueLeaf(ctx context.Context, in *QueueLeafRequest, opts ...grpc.CallOption) (*QueueLeafResponse, error) {
out := new(QueueLeafResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/QueueLeaf", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_QueueLeaf_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -87,7 +113,7 @@ func (c *trillianLogClient) QueueLeaf(ctx context.Context, in *QueueLeafRequest,
func (c *trillianLogClient) GetInclusionProof(ctx context.Context, in *GetInclusionProofRequest, opts ...grpc.CallOption) (*GetInclusionProofResponse, error) {
out := new(GetInclusionProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetInclusionProof", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetInclusionProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -96,7 +122,7 @@ func (c *trillianLogClient) GetInclusionProof(ctx context.Context, in *GetInclus
func (c *trillianLogClient) GetInclusionProofByHash(ctx context.Context, in *GetInclusionProofByHashRequest, opts ...grpc.CallOption) (*GetInclusionProofByHashResponse, error) {
out := new(GetInclusionProofByHashResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetInclusionProofByHash", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetInclusionProofByHash_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -105,7 +131,7 @@ func (c *trillianLogClient) GetInclusionProofByHash(ctx context.Context, in *Get
func (c *trillianLogClient) GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error) {
out := new(GetConsistencyProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetConsistencyProof", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetConsistencyProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -114,7 +140,7 @@ func (c *trillianLogClient) GetConsistencyProof(ctx context.Context, in *GetCons
func (c *trillianLogClient) GetLatestSignedLogRoot(ctx context.Context, in *GetLatestSignedLogRootRequest, opts ...grpc.CallOption) (*GetLatestSignedLogRootResponse, error) {
out := new(GetLatestSignedLogRootResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetLatestSignedLogRoot", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetLatestSignedLogRoot_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -123,7 +149,7 @@ func (c *trillianLogClient) GetLatestSignedLogRoot(ctx context.Context, in *GetL
func (c *trillianLogClient) GetEntryAndProof(ctx context.Context, in *GetEntryAndProofRequest, opts ...grpc.CallOption) (*GetEntryAndProofResponse, error) {
out := new(GetEntryAndProofResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetEntryAndProof", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetEntryAndProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -132,7 +158,7 @@ func (c *trillianLogClient) GetEntryAndProof(ctx context.Context, in *GetEntryAn
func (c *trillianLogClient) InitLog(ctx context.Context, in *InitLogRequest, opts ...grpc.CallOption) (*InitLogResponse, error) {
out := new(InitLogResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/InitLog", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_InitLog_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -141,7 +167,7 @@ func (c *trillianLogClient) InitLog(ctx context.Context, in *InitLogRequest, opt
func (c *trillianLogClient) AddSequencedLeaves(ctx context.Context, in *AddSequencedLeavesRequest, opts ...grpc.CallOption) (*AddSequencedLeavesResponse, error) {
out := new(AddSequencedLeavesResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/AddSequencedLeaves", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_AddSequencedLeaves_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -150,7 +176,7 @@ func (c *trillianLogClient) AddSequencedLeaves(ctx context.Context, in *AddSeque
func (c *trillianLogClient) GetLeavesByRange(ctx context.Context, in *GetLeavesByRangeRequest, opts ...grpc.CallOption) (*GetLeavesByRangeResponse, error) {
out := new(GetLeavesByRangeResponse)
- err := c.cc.Invoke(ctx, "/trillian.TrillianLog/GetLeavesByRange", in, out, opts...)
+ err := c.cc.Invoke(ctx, TrillianLog_GetLeavesByRange_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -260,7 +286,7 @@ func _TrillianLog_QueueLeaf_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/QueueLeaf",
+ FullMethod: TrillianLog_QueueLeaf_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).QueueLeaf(ctx, req.(*QueueLeafRequest))
@@ -278,7 +304,7 @@ func _TrillianLog_GetInclusionProof_Handler(srv interface{}, ctx context.Context
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetInclusionProof",
+ FullMethod: TrillianLog_GetInclusionProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetInclusionProof(ctx, req.(*GetInclusionProofRequest))
@@ -296,7 +322,7 @@ func _TrillianLog_GetInclusionProofByHash_Handler(srv interface{}, ctx context.C
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetInclusionProofByHash",
+ FullMethod: TrillianLog_GetInclusionProofByHash_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetInclusionProofByHash(ctx, req.(*GetInclusionProofByHashRequest))
@@ -314,7 +340,7 @@ func _TrillianLog_GetConsistencyProof_Handler(srv interface{}, ctx context.Conte
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetConsistencyProof",
+ FullMethod: TrillianLog_GetConsistencyProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetConsistencyProof(ctx, req.(*GetConsistencyProofRequest))
@@ -332,7 +358,7 @@ func _TrillianLog_GetLatestSignedLogRoot_Handler(srv interface{}, ctx context.Co
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetLatestSignedLogRoot",
+ FullMethod: TrillianLog_GetLatestSignedLogRoot_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetLatestSignedLogRoot(ctx, req.(*GetLatestSignedLogRootRequest))
@@ -350,7 +376,7 @@ func _TrillianLog_GetEntryAndProof_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetEntryAndProof",
+ FullMethod: TrillianLog_GetEntryAndProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetEntryAndProof(ctx, req.(*GetEntryAndProofRequest))
@@ -368,7 +394,7 @@ func _TrillianLog_InitLog_Handler(srv interface{}, ctx context.Context, dec func
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/InitLog",
+ FullMethod: TrillianLog_InitLog_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).InitLog(ctx, req.(*InitLogRequest))
@@ -386,7 +412,7 @@ func _TrillianLog_AddSequencedLeaves_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/AddSequencedLeaves",
+ FullMethod: TrillianLog_AddSequencedLeaves_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).AddSequencedLeaves(ctx, req.(*AddSequencedLeavesRequest))
@@ -404,7 +430,7 @@ func _TrillianLog_GetLeavesByRange_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/trillian.TrillianLog/GetLeavesByRange",
+ FullMethod: TrillianLog_GetLeavesByRange_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetLeavesByRange(ctx, req.(*GetLeavesByRangeRequest))
diff --git a/vendor/github.com/leodido/go-urn/.gitignore b/vendor/github.com/leodido/go-urn/.gitignore
deleted file mode 100644
index 89d4bc55dc..0000000000
--- a/vendor/github.com/leodido/go-urn/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-*.exe
-*.dll
-*.so
-*.dylib
-
-*.test
-
-*.out
-*.txt
-
-vendor/
-/removecomments
\ No newline at end of file
diff --git a/vendor/github.com/leodido/go-urn/LICENSE b/vendor/github.com/leodido/go-urn/LICENSE
deleted file mode 100644
index 8c3504a5a9..0000000000
--- a/vendor/github.com/leodido/go-urn/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 Leonardo Di Donato
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/leodido/go-urn/README.md b/vendor/github.com/leodido/go-urn/README.md
deleted file mode 100644
index 731eecbb5f..0000000000
--- a/vendor/github.com/leodido/go-urn/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-[![Build](https://img.shields.io/circleci/build/github/leodido/go-urn?style=for-the-badge)](https://app.circleci.com/pipelines/github/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn)
-
-**A parser for URNs**.
-
-> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1).
-
-[API documentation](https://godoc.org/github.com/leodido/go-urn).
-
-## Installation
-
-```
-go get github.com/leodido/go-urn
-```
-
-## Performances
-
-This implementation results to be really fast.
-
-Usually below ½ microsecond on my machine[1](#mymachine).
-
-Notice it also performs, while parsing:
-
-1. fine-grained and informative erroring
-2. specific-string normalization
-
-```
-ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op
-ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op
-ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op
-ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op
-ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op
-ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op
-ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op
-ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op
-ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op
-ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op
-ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op
-no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op
-no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op
-no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op
-no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op
-no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op
-no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op
-no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op
-no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op
-no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op
-```
-
----
-
-* [1]: Intel Core i7-7600U CPU @ 2.80GHz
-
----
-
-## Example
-```go
-package main
-
-import (
- "fmt"
- "github.com/leodido/go-urn"
-)
-
-func main() {
- var uid = "URN:foo:a123,456"
-
- u, ok := urn.Parse([]byte(uid))
- if !ok {
- panic("error parsing urn")
- }
-
- fmt.Println(u.ID)
- fmt.Println(u.SS)
-
- // Output:
- // foo
- // a123,456
-}
-```
-
-[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon)
\ No newline at end of file
diff --git a/vendor/github.com/leodido/go-urn/machine.go b/vendor/github.com/leodido/go-urn/machine.go
deleted file mode 100644
index fe5a0cc861..0000000000
--- a/vendor/github.com/leodido/go-urn/machine.go
+++ /dev/null
@@ -1,1691 +0,0 @@
-package urn
-
-import (
- "fmt"
-)
-
-var (
- errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
- errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]"
- errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
- errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
- errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]"
- errParse = "parsing error [col %d]"
-)
-
-const start int = 1
-const firstFinal int = 44
-
-const enFail int = 46
-const enMain int = 1
-
-// Machine is the interface representing the FSM
-type Machine interface {
- Error() error
- Parse(input []byte) (*URN, error)
-}
-
-type machine struct {
- data []byte
- cs int
- p, pe, eof, pb int
- err error
- tolower []int
-}
-
-// NewMachine creates a new FSM able to parse RFC 2141 strings.
-func NewMachine() Machine {
- m := &machine{}
-
- return m
-}
-
-// Err returns the error that occurred on the last call to Parse.
-//
-// If the result is nil, then the line was parsed successfully.
-func (m *machine) Error() error {
- return m.err
-}
-
-func (m *machine) text() []byte {
- return m.data[m.pb:m.p]
-}
-
-// Parse parses the input byte array as a RFC 2141 string.
-func (m *machine) Parse(input []byte) (*URN, error) {
- m.data = input
- m.p = 0
- m.pb = 0
- m.pe = len(input)
- m.eof = len(input)
- m.err = nil
- m.tolower = []int{}
- output := &URN{}
-
- {
- m.cs = start
- }
-
- {
- if (m.p) == (m.pe) {
- goto _testEof
- }
- switch m.cs {
- case 1:
- goto stCase1
- case 0:
- goto stCase0
- case 2:
- goto stCase2
- case 3:
- goto stCase3
- case 4:
- goto stCase4
- case 5:
- goto stCase5
- case 6:
- goto stCase6
- case 7:
- goto stCase7
- case 8:
- goto stCase8
- case 9:
- goto stCase9
- case 10:
- goto stCase10
- case 11:
- goto stCase11
- case 12:
- goto stCase12
- case 13:
- goto stCase13
- case 14:
- goto stCase14
- case 15:
- goto stCase15
- case 16:
- goto stCase16
- case 17:
- goto stCase17
- case 18:
- goto stCase18
- case 19:
- goto stCase19
- case 20:
- goto stCase20
- case 21:
- goto stCase21
- case 22:
- goto stCase22
- case 23:
- goto stCase23
- case 24:
- goto stCase24
- case 25:
- goto stCase25
- case 26:
- goto stCase26
- case 27:
- goto stCase27
- case 28:
- goto stCase28
- case 29:
- goto stCase29
- case 30:
- goto stCase30
- case 31:
- goto stCase31
- case 32:
- goto stCase32
- case 33:
- goto stCase33
- case 34:
- goto stCase34
- case 35:
- goto stCase35
- case 36:
- goto stCase36
- case 37:
- goto stCase37
- case 38:
- goto stCase38
- case 44:
- goto stCase44
- case 39:
- goto stCase39
- case 40:
- goto stCase40
- case 45:
- goto stCase45
- case 41:
- goto stCase41
- case 42:
- goto stCase42
- case 43:
- goto stCase43
- case 46:
- goto stCase46
- }
- goto stOut
- stCase1:
- switch (m.data)[(m.p)] {
- case 85:
- goto tr1
- case 117:
- goto tr1
- }
- goto tr0
- tr0:
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr3:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr6:
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr41:
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr44:
-
- m.err = fmt.Errorf(errHex, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr50:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr52:
-
- m.err = fmt.Errorf(errNoUrnWithinID, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- stCase0:
- st0:
- m.cs = 0
- goto _out
- tr1:
-
- m.pb = m.p
-
- goto st2
- st2:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof2
- }
- stCase2:
- switch (m.data)[(m.p)] {
- case 82:
- goto st3
- case 114:
- goto st3
- }
- goto tr0
- st3:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof3
- }
- stCase3:
- switch (m.data)[(m.p)] {
- case 78:
- goto st4
- case 110:
- goto st4
- }
- goto tr3
- st4:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof4
- }
- stCase4:
- if (m.data)[(m.p)] == 58 {
- goto tr5
- }
- goto tr0
- tr5:
-
- output.prefix = string(m.text())
-
- goto st5
- st5:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof5
- }
- stCase5:
- switch (m.data)[(m.p)] {
- case 85:
- goto tr8
- case 117:
- goto tr8
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto tr7
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto tr7
- }
- default:
- goto tr7
- }
- goto tr6
- tr7:
-
- m.pb = m.p
-
- goto st6
- st6:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof6
- }
- stCase6:
- switch (m.data)[(m.p)] {
- case 45:
- goto st7
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st7
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st7
- }
- default:
- goto st7
- }
- goto tr6
- st7:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof7
- }
- stCase7:
- switch (m.data)[(m.p)] {
- case 45:
- goto st8
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st8
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st8
- }
- default:
- goto st8
- }
- goto tr6
- st8:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof8
- }
- stCase8:
- switch (m.data)[(m.p)] {
- case 45:
- goto st9
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st9
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st9
- }
- default:
- goto st9
- }
- goto tr6
- st9:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof9
- }
- stCase9:
- switch (m.data)[(m.p)] {
- case 45:
- goto st10
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st10
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st10
- }
- default:
- goto st10
- }
- goto tr6
- st10:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof10
- }
- stCase10:
- switch (m.data)[(m.p)] {
- case 45:
- goto st11
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st11
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st11
- }
- default:
- goto st11
- }
- goto tr6
- st11:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof11
- }
- stCase11:
- switch (m.data)[(m.p)] {
- case 45:
- goto st12
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st12
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st12
- }
- default:
- goto st12
- }
- goto tr6
- st12:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof12
- }
- stCase12:
- switch (m.data)[(m.p)] {
- case 45:
- goto st13
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st13
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st13
- }
- default:
- goto st13
- }
- goto tr6
- st13:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof13
- }
- stCase13:
- switch (m.data)[(m.p)] {
- case 45:
- goto st14
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st14
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st14
- }
- default:
- goto st14
- }
- goto tr6
- st14:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof14
- }
- stCase14:
- switch (m.data)[(m.p)] {
- case 45:
- goto st15
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st15
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st15
- }
- default:
- goto st15
- }
- goto tr6
- st15:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof15
- }
- stCase15:
- switch (m.data)[(m.p)] {
- case 45:
- goto st16
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st16
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st16
- }
- default:
- goto st16
- }
- goto tr6
- st16:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof16
- }
- stCase16:
- switch (m.data)[(m.p)] {
- case 45:
- goto st17
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st17
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st17
- }
- default:
- goto st17
- }
- goto tr6
- st17:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof17
- }
- stCase17:
- switch (m.data)[(m.p)] {
- case 45:
- goto st18
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st18
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st18
- }
- default:
- goto st18
- }
- goto tr6
- st18:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof18
- }
- stCase18:
- switch (m.data)[(m.p)] {
- case 45:
- goto st19
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st19
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st19
- }
- default:
- goto st19
- }
- goto tr6
- st19:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof19
- }
- stCase19:
- switch (m.data)[(m.p)] {
- case 45:
- goto st20
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st20
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st20
- }
- default:
- goto st20
- }
- goto tr6
- st20:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof20
- }
- stCase20:
- switch (m.data)[(m.p)] {
- case 45:
- goto st21
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st21
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st21
- }
- default:
- goto st21
- }
- goto tr6
- st21:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof21
- }
- stCase21:
- switch (m.data)[(m.p)] {
- case 45:
- goto st22
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st22
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st22
- }
- default:
- goto st22
- }
- goto tr6
- st22:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof22
- }
- stCase22:
- switch (m.data)[(m.p)] {
- case 45:
- goto st23
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st23
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st23
- }
- default:
- goto st23
- }
- goto tr6
- st23:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof23
- }
- stCase23:
- switch (m.data)[(m.p)] {
- case 45:
- goto st24
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st24
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st24
- }
- default:
- goto st24
- }
- goto tr6
- st24:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof24
- }
- stCase24:
- switch (m.data)[(m.p)] {
- case 45:
- goto st25
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st25
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st25
- }
- default:
- goto st25
- }
- goto tr6
- st25:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof25
- }
- stCase25:
- switch (m.data)[(m.p)] {
- case 45:
- goto st26
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st26
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st26
- }
- default:
- goto st26
- }
- goto tr6
- st26:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof26
- }
- stCase26:
- switch (m.data)[(m.p)] {
- case 45:
- goto st27
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st27
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st27
- }
- default:
- goto st27
- }
- goto tr6
- st27:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof27
- }
- stCase27:
- switch (m.data)[(m.p)] {
- case 45:
- goto st28
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st28
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st28
- }
- default:
- goto st28
- }
- goto tr6
- st28:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof28
- }
- stCase28:
- switch (m.data)[(m.p)] {
- case 45:
- goto st29
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st29
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st29
- }
- default:
- goto st29
- }
- goto tr6
- st29:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof29
- }
- stCase29:
- switch (m.data)[(m.p)] {
- case 45:
- goto st30
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st30
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st30
- }
- default:
- goto st30
- }
- goto tr6
- st30:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof30
- }
- stCase30:
- switch (m.data)[(m.p)] {
- case 45:
- goto st31
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st31
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st31
- }
- default:
- goto st31
- }
- goto tr6
- st31:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof31
- }
- stCase31:
- switch (m.data)[(m.p)] {
- case 45:
- goto st32
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st32
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st32
- }
- default:
- goto st32
- }
- goto tr6
- st32:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof32
- }
- stCase32:
- switch (m.data)[(m.p)] {
- case 45:
- goto st33
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st33
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st33
- }
- default:
- goto st33
- }
- goto tr6
- st33:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof33
- }
- stCase33:
- switch (m.data)[(m.p)] {
- case 45:
- goto st34
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st34
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st34
- }
- default:
- goto st34
- }
- goto tr6
- st34:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof34
- }
- stCase34:
- switch (m.data)[(m.p)] {
- case 45:
- goto st35
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st35
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st35
- }
- default:
- goto st35
- }
- goto tr6
- st35:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof35
- }
- stCase35:
- switch (m.data)[(m.p)] {
- case 45:
- goto st36
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st36
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st36
- }
- default:
- goto st36
- }
- goto tr6
- st36:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof36
- }
- stCase36:
- switch (m.data)[(m.p)] {
- case 45:
- goto st37
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st37
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st37
- }
- default:
- goto st37
- }
- goto tr6
- st37:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof37
- }
- stCase37:
- if (m.data)[(m.p)] == 58 {
- goto tr10
- }
- goto tr6
- tr10:
-
- output.ID = string(m.text())
-
- goto st38
- st38:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof38
- }
- stCase38:
- switch (m.data)[(m.p)] {
- case 33:
- goto tr42
- case 36:
- goto tr42
- case 37:
- goto tr43
- case 61:
- goto tr42
- case 95:
- goto tr42
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto tr42
- }
- case (m.data)[(m.p)] > 59:
- switch {
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto tr42
- }
- case (m.data)[(m.p)] >= 64:
- goto tr42
- }
- default:
- goto tr42
- }
- goto tr41
- tr42:
-
- m.pb = m.p
-
- goto st44
- st44:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof44
- }
- stCase44:
- switch (m.data)[(m.p)] {
- case 33:
- goto st44
- case 36:
- goto st44
- case 37:
- goto st39
- case 61:
- goto st44
- case 95:
- goto st44
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto st44
- }
- case (m.data)[(m.p)] > 59:
- switch {
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st44
- }
- case (m.data)[(m.p)] >= 64:
- goto st44
- }
- default:
- goto st44
- }
- goto tr41
- tr43:
-
- m.pb = m.p
-
- goto st39
- st39:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof39
- }
- stCase39:
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st40
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st40
- }
- default:
- goto tr46
- }
- goto tr44
- tr46:
-
- m.tolower = append(m.tolower, m.p-m.pb)
-
- goto st40
- st40:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof40
- }
- stCase40:
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st45
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st45
- }
- default:
- goto tr48
- }
- goto tr44
- tr48:
-
- m.tolower = append(m.tolower, m.p-m.pb)
-
- goto st45
- st45:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof45
- }
- stCase45:
- switch (m.data)[(m.p)] {
- case 33:
- goto st44
- case 36:
- goto st44
- case 37:
- goto st39
- case 61:
- goto st44
- case 95:
- goto st44
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto st44
- }
- case (m.data)[(m.p)] > 59:
- switch {
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st44
- }
- case (m.data)[(m.p)] >= 64:
- goto st44
- }
- default:
- goto st44
- }
- goto tr44
- tr8:
-
- m.pb = m.p
-
- goto st41
- st41:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof41
- }
- stCase41:
- switch (m.data)[(m.p)] {
- case 45:
- goto st7
- case 58:
- goto tr10
- case 82:
- goto st42
- case 114:
- goto st42
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st7
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st7
- }
- default:
- goto st7
- }
- goto tr6
- st42:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof42
- }
- stCase42:
- switch (m.data)[(m.p)] {
- case 45:
- goto st8
- case 58:
- goto tr10
- case 78:
- goto st43
- case 110:
- goto st43
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st8
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st8
- }
- default:
- goto st8
- }
- goto tr50
- st43:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof43
- }
- stCase43:
- if (m.data)[(m.p)] == 45 {
- goto st9
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st9
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st9
- }
- default:
- goto st9
- }
- goto tr52
- st46:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof46
- }
- stCase46:
- switch (m.data)[(m.p)] {
- case 10:
- goto st0
- case 13:
- goto st0
- }
- goto st46
- stOut:
- _testEof2:
- m.cs = 2
- goto _testEof
- _testEof3:
- m.cs = 3
- goto _testEof
- _testEof4:
- m.cs = 4
- goto _testEof
- _testEof5:
- m.cs = 5
- goto _testEof
- _testEof6:
- m.cs = 6
- goto _testEof
- _testEof7:
- m.cs = 7
- goto _testEof
- _testEof8:
- m.cs = 8
- goto _testEof
- _testEof9:
- m.cs = 9
- goto _testEof
- _testEof10:
- m.cs = 10
- goto _testEof
- _testEof11:
- m.cs = 11
- goto _testEof
- _testEof12:
- m.cs = 12
- goto _testEof
- _testEof13:
- m.cs = 13
- goto _testEof
- _testEof14:
- m.cs = 14
- goto _testEof
- _testEof15:
- m.cs = 15
- goto _testEof
- _testEof16:
- m.cs = 16
- goto _testEof
- _testEof17:
- m.cs = 17
- goto _testEof
- _testEof18:
- m.cs = 18
- goto _testEof
- _testEof19:
- m.cs = 19
- goto _testEof
- _testEof20:
- m.cs = 20
- goto _testEof
- _testEof21:
- m.cs = 21
- goto _testEof
- _testEof22:
- m.cs = 22
- goto _testEof
- _testEof23:
- m.cs = 23
- goto _testEof
- _testEof24:
- m.cs = 24
- goto _testEof
- _testEof25:
- m.cs = 25
- goto _testEof
- _testEof26:
- m.cs = 26
- goto _testEof
- _testEof27:
- m.cs = 27
- goto _testEof
- _testEof28:
- m.cs = 28
- goto _testEof
- _testEof29:
- m.cs = 29
- goto _testEof
- _testEof30:
- m.cs = 30
- goto _testEof
- _testEof31:
- m.cs = 31
- goto _testEof
- _testEof32:
- m.cs = 32
- goto _testEof
- _testEof33:
- m.cs = 33
- goto _testEof
- _testEof34:
- m.cs = 34
- goto _testEof
- _testEof35:
- m.cs = 35
- goto _testEof
- _testEof36:
- m.cs = 36
- goto _testEof
- _testEof37:
- m.cs = 37
- goto _testEof
- _testEof38:
- m.cs = 38
- goto _testEof
- _testEof44:
- m.cs = 44
- goto _testEof
- _testEof39:
- m.cs = 39
- goto _testEof
- _testEof40:
- m.cs = 40
- goto _testEof
- _testEof45:
- m.cs = 45
- goto _testEof
- _testEof41:
- m.cs = 41
- goto _testEof
- _testEof42:
- m.cs = 42
- goto _testEof
- _testEof43:
- m.cs = 43
- goto _testEof
- _testEof46:
- m.cs = 46
- goto _testEof
-
- _testEof:
- {
- }
- if (m.p) == (m.eof) {
- switch m.cs {
- case 44, 45:
-
- raw := m.text()
- output.SS = string(raw)
- // Iterate upper letters lowering them
- for _, i := range m.tolower {
- raw[i] = raw[i] + 32
- }
- output.norm = string(raw)
-
- case 1, 2, 4:
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 3:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41:
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 38:
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 42:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 43:
-
- m.err = fmt.Errorf(errNoUrnWithinID, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- case 39, 40:
-
- m.err = fmt.Errorf(errHex, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- }
- }
-
- _out:
- {
- }
- }
-
- if m.cs < firstFinal || m.cs == enFail {
- return nil, m.err
- }
-
- return output, nil
-}
diff --git a/vendor/github.com/leodido/go-urn/machine.go.rl b/vendor/github.com/leodido/go-urn/machine.go.rl
deleted file mode 100644
index 3bc05a651a..0000000000
--- a/vendor/github.com/leodido/go-urn/machine.go.rl
+++ /dev/null
@@ -1,159 +0,0 @@
-package urn
-
-import (
- "fmt"
-)
-
-var (
- errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
- errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]"
- errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
- errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
- errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]"
- errParse = "parsing error [col %d]"
-)
-
-%%{
-machine urn;
-
-# unsigned alphabet
-alphtype uint8;
-
-action mark {
- m.pb = m.p
-}
-
-action tolower {
- m.tolower = append(m.tolower, m.p - m.pb)
-}
-
-action set_pre {
- output.prefix = string(m.text())
-}
-
-action set_nid {
- output.ID = string(m.text())
-}
-
-action set_nss {
- raw := m.text()
- output.SS = string(raw)
- // Iterate upper letters lowering them
- for _, i := range m.tolower {
- raw[i] = raw[i] + 32
- }
- output.norm = string(raw)
-}
-
-action err_pre {
- m.err = fmt.Errorf(errPrefix, m.p)
- fhold;
- fgoto fail;
-}
-
-action err_nid {
- m.err = fmt.Errorf(errIdentifier, m.p)
- fhold;
- fgoto fail;
-}
-
-action err_nss {
- m.err = fmt.Errorf(errSpecificString, m.p)
- fhold;
- fgoto fail;
-}
-
-action err_urn {
- m.err = fmt.Errorf(errNoUrnWithinID, m.p)
- fhold;
- fgoto fail;
-}
-
-action err_hex {
- m.err = fmt.Errorf(errHex, m.p)
- fhold;
- fgoto fail;
-}
-
-action err_parse {
- m.err = fmt.Errorf(errParse, m.p)
- fhold;
- fgoto fail;
-}
-
-pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre;
-
-nid = (alnum >mark (alnum | '-'){0,31}) %set_nid;
-
-hex = '%' (digit | lower | upper >tolower){2} $err(err_hex);
-
-sss = (alnum | [()+,\-.:=@;$_!*']);
-
-nss = (sss | hex)+ $err(err_nss);
-
-fail := (any - [\n\r])* @err{ fgoto main; };
-
-main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse);
-
-}%%
-
-%% write data noerror noprefix;
-
-// Machine is the interface representing the FSM
-type Machine interface {
- Error() error
- Parse(input []byte) (*URN, error)
-}
-
-type machine struct {
- data []byte
- cs int
- p, pe, eof, pb int
- err error
- tolower []int
-}
-
-// NewMachine creates a new FSM able to parse RFC 2141 strings.
-func NewMachine() Machine {
- m := &machine{}
-
- %% access m.;
- %% variable p m.p;
- %% variable pe m.pe;
- %% variable eof m.eof;
- %% variable data m.data;
-
- return m
-}
-
-// Err returns the error that occurred on the last call to Parse.
-//
-// If the result is nil, then the line was parsed successfully.
-func (m *machine) Error() error {
- return m.err
-}
-
-func (m *machine) text() []byte {
- return m.data[m.pb:m.p]
-}
-
-// Parse parses the input byte array as a RFC 2141 string.
-func (m *machine) Parse(input []byte) (*URN, error) {
- m.data = input
- m.p = 0
- m.pb = 0
- m.pe = len(input)
- m.eof = len(input)
- m.err = nil
- m.tolower = []int{}
- output := &URN{}
-
- %% write init;
- %% write exec;
-
- if m.cs < first_final || m.cs == en_fail {
- return nil, m.err
- }
-
- return output, nil
-}
diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile
deleted file mode 100644
index d088c044e6..0000000000
--- a/vendor/github.com/leodido/go-urn/makefile
+++ /dev/null
@@ -1,53 +0,0 @@
-SHELL := /bin/bash
-RAGEL := ragel
-GOFMT := go fmt
-
-export GO_TEST=env GOTRACEBACK=all go test $(GO_ARGS)
-
-.PHONY: build
-build: machine.go
-
-.PHONY: clean
-clean:
- @rm -rf docs
- @rm -f machine.go
-
-.PHONY: images
-images: docs/urn.png
-
-.PHONY: removecomments
-removecomments:
- @cd ./tools/removecomments; go build -o ../../removecomments ./main.go
-
-machine.go: machine.go.rl
-
-machine.go: removecomments
-
-machine.go:
- $(RAGEL) -Z -G2 -e -o $@ $<
- @./removecomments $@
- $(MAKE) -s file=$@ snake2camel
- $(GOFMT) $@
-
-docs/urn.dot: machine.go.rl
- @mkdir -p docs
- $(RAGEL) -Z -e -Vp $< -o $@
-
-docs/urn.png: docs/urn.dot
- dot $< -Tpng -o $@
-
-.PHONY: bench
-bench: *_test.go machine.go
- go test -bench=. -benchmem -benchtime=5s ./...
-
-.PHONY: tests
-tests: *_test.go
- $(GO_TEST) ./...
-
-.PHONY: snake2camel
-snake2camel:
- @awk -i inplace '{ \
- while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \
- $$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \
- print \
- }' $(file)
\ No newline at end of file
diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go
deleted file mode 100644
index d51a6c915b..0000000000
--- a/vendor/github.com/leodido/go-urn/urn.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package urn
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-const errInvalidURN = "invalid URN: %s"
-
-// URN represents an Uniform Resource Name.
-//
-// The general form represented is:
-//
-// urn::
-//
-// Details at https://tools.ietf.org/html/rfc2141.
-type URN struct {
- prefix string // Static prefix. Equal to "urn" when empty.
- ID string // Namespace identifier
- SS string // Namespace specific string
- norm string // Normalized namespace specific string
-}
-
-// Normalize turns the receiving URN into its norm version.
-//
-// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except tokens which are lowercased).
-func (u *URN) Normalize() *URN {
- return &URN{
- prefix: "urn",
- ID: strings.ToLower(u.ID),
- SS: u.norm,
- }
-}
-
-// Equal checks the lexical equivalence of the current URN with another one.
-func (u *URN) Equal(x *URN) bool {
- return *u.Normalize() == *x.Normalize()
-}
-
-// String reassembles the URN into a valid URN string.
-//
-// This requires both ID and SS fields to be non-empty.
-// Otherwise it returns an empty string.
-//
-// Default URN prefix is "urn".
-func (u *URN) String() string {
- var res string
- if u.ID != "" && u.SS != "" {
- if u.prefix == "" {
- res += "urn"
- }
- res += u.prefix + ":" + u.ID + ":" + u.SS
- }
-
- return res
-}
-
-// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax.
-func Parse(u []byte) (*URN, bool) {
- urn, err := NewMachine().Parse(u)
- if err != nil {
- return nil, false
- }
-
- return urn, true
-}
-
-// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`).
-func (u URN) MarshalJSON() ([]byte, error) {
- return json.Marshal(u.String())
-}
-
-// MarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`).
-func (u *URN) UnmarshalJSON(bytes []byte) error {
- var str string
- if err := json.Unmarshal(bytes, &str); err != nil {
- return err
- }
- if value, ok := Parse([]byte(str)); !ok {
- return fmt.Errorf(errInvalidURN, str)
- } else {
- *u = *value
- }
- return nil
-}
\ No newline at end of file
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
index e0ae2cdd31..b2e329427c 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_parameters.go
@@ -30,6 +30,7 @@ import (
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
)
// NewGetLogInfoParams creates a new GetLogInfoParams object,
@@ -76,6 +77,13 @@ GetLogInfoParams contains all the parameters to send to the API endpoint
Typically these are written to a http.Request.
*/
type GetLogInfoParams struct {
+
+ /* Stable.
+
+ Whether to return a stable checkpoint for the active shard
+ */
+ Stable *bool
+
timeout time.Duration
Context context.Context
HTTPClient *http.Client
@@ -93,7 +101,18 @@ func (o *GetLogInfoParams) WithDefaults() *GetLogInfoParams {
//
// All values with no default are reset to their zero value.
func (o *GetLogInfoParams) SetDefaults() {
- // no default values defined for this parameter
+ var (
+ stableDefault = bool(false)
+ )
+
+ val := GetLogInfoParams{
+ Stable: &stableDefault,
+ }
+
+ val.timeout = o.timeout
+ val.Context = o.Context
+ val.HTTPClient = o.HTTPClient
+ *o = val
}
// WithTimeout adds the timeout to the get log info params
@@ -129,6 +148,17 @@ func (o *GetLogInfoParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
+// WithStable adds the stable to the get log info params
+func (o *GetLogInfoParams) WithStable(stable *bool) *GetLogInfoParams {
+ o.SetStable(stable)
+ return o
+}
+
+// SetStable adds the stable to the get log info params
+func (o *GetLogInfoParams) SetStable(stable *bool) {
+ o.Stable = stable
+}
+
// WriteToRequest writes these params to a swagger request
func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
@@ -137,6 +167,23 @@ func (o *GetLogInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Re
}
var res []error
+ if o.Stable != nil {
+
+ // query param stable
+ var qrStable bool
+
+ if o.Stable != nil {
+ qrStable = *o.Stable
+ }
+ qStable := swag.FormatBool(qrStable)
+ if qStable != "" {
+
+ if err := r.SetQueryParam("stable", qStable); err != nil {
+ return err
+ }
+ }
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
new file mode 100644
index 0000000000..dde562054c
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go
@@ -0,0 +1,210 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSE DSSE envelope
+//
+// swagger:model dsse
+type DSSE struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+}
+
+// Kind gets the kind of this subtype
+func (m *DSSE) Kind() string {
+ return "dsse"
+}
+
+// SetKind sets the kind of this subtype
+func (m *DSSE) SetKind(val string) {
+}
+
+// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure
+func (m *DSSE) UnmarshalJSON(raw []byte) error {
+ var data struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }
+ buf := bytes.NewBuffer(raw)
+ dec := json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&data); err != nil {
+ return err
+ }
+
+ var base struct {
+ /* Just the base type fields. Used for unmashalling polymorphic types.*/
+
+ Kind string `json:"kind"`
+ }
+ buf = bytes.NewBuffer(raw)
+ dec = json.NewDecoder(buf)
+ dec.UseNumber()
+
+ if err := dec.Decode(&base); err != nil {
+ return err
+ }
+
+ var result DSSE
+
+ if base.Kind != result.Kind() {
+ /* Not the type we're looking for. */
+ return errors.New(422, "invalid kind value: %q", base.Kind)
+ }
+
+ result.APIVersion = data.APIVersion
+ result.Spec = data.Spec
+
+ *m = result
+
+ return nil
+}
+
+// MarshalJSON marshals this object with a polymorphic type to a JSON structure
+func (m DSSE) MarshalJSON() ([]byte, error) {
+ var b1, b2, b3 []byte
+ var err error
+ b1, err = json.Marshal(struct {
+
+ // api version
+ // Required: true
+ // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$
+ APIVersion *string `json:"apiVersion"`
+
+ // spec
+ // Required: true
+ Spec DSSESchema `json:"spec"`
+ }{
+
+ APIVersion: m.APIVersion,
+
+ Spec: m.Spec,
+ })
+ if err != nil {
+ return nil, err
+ }
+ b2, err = json.Marshal(struct {
+ Kind string `json:"kind"`
+ }{
+
+ Kind: m.Kind(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// Validate validates this dsse
+func (m *DSSE) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAPIVersion(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSpec(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error {
+
+ if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSE) validateSpec(formats strfmt.Registry) error {
+
+ if m.Spec == nil {
+ return errors.Required("spec", "body", nil)
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse based on the context it is used
+func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSE) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSE) UnmarshalBinary(b []byte) error {
+ var res DSSE
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
new file mode 100644
index 0000000000..7795626438
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go
@@ -0,0 +1,29 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// DSSESchema DSSE Schema
+//
+// log entry schema for dsse envelopes
+//
+// swagger:model dsseSchema
+type DSSESchema interface{}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
new file mode 100644
index 0000000000..a28dd52446
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
@@ -0,0 +1,665 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "encoding/json"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+ "github.com/go-openapi/validate"
+)
+
+// DSSEV001Schema DSSE v0.0.1 Schema
+//
+// # Schema for DSSE envelopes
+//
+// swagger:model dsseV001Schema
+type DSSEV001Schema struct {
+
+ // envelope hash
+ EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"`
+
+ // payload hash
+ PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"`
+
+ // proposed content
+ ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"`
+
+ // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings
+ // Read Only: true
+ // Min Items: 1
+ Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"`
+}
+
+// Validate validates this dsse v001 schema
+func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelopeHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePayloadHash(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateProposedContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSignatures(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.EnvelopeHash) { // not required
+ return nil
+ }
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("envelopeHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("envelopeHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error {
+ if swag.IsZero(m.PayloadHash) { // not required
+ return nil
+ }
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("payloadHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("payloadHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error {
+ if swag.IsZero(m.ProposedContent) { // not required
+ return nil
+ }
+
+ if m.ProposedContent != nil {
+ if err := m.ProposedContent.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("proposedContent")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("proposedContent")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error {
+ if swag.IsZero(m.Signatures) { // not required
+ return nil
+ }
+
+ iSignaturesSize := int64(len(m.Signatures))
+
+ if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+ if swag.IsZero(m.Signatures[i]) { // not required
+ continue
+ }
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this dsse v001 schema based on the context it is used
+func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePayloadHash(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateProposedContent(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSignatures(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.EnvelopeHash != nil {
+ if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("envelopeHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("envelopeHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PayloadHash != nil {
+ if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("payloadHash")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("payloadHash")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.ProposedContent != nil {
+ if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("proposedContent")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("proposedContent")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := validate.ReadOnly(ctx, "signatures", "body", []*DSSEV001SchemaSignaturesItems0(m.Signatures)); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(m.Signatures); i++ {
+
+ if m.Signatures[i] != nil {
+ if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("signatures" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("signatures" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error {
+ var res DSSEV001Schema
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor
+//
+// swagger:model DSSEV001SchemaEnvelopeHash
+type DSSEV001SchemaEnvelopeHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: [sha256]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the entire envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema envelope hash
+func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []interface{}
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used
+func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaEnvelopeHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope
+//
+// swagger:model DSSEV001SchemaPayloadHash
+type DSSEV001SchemaPayloadHash struct {
+
+ // The hashing function used to compute the hash value
+ // Required: true
+ // Enum: [sha256]
+ Algorithm *string `json:"algorithm"`
+
+ // The value of the computed digest over the payload within the envelope
+ // Required: true
+ Value *string `json:"value"`
+}
+
+// Validate validates this DSSE v001 schema payload hash
+func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateAlgorithm(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateValue(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []interface{}
+
+func init() {
+ var res []string
+ if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil {
+ panic(err)
+ }
+ for _, v := range res {
+ dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v)
+ }
+}
+
+const (
+
+ // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256"
+ DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256"
+)
+
+// prop value enum
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error {
+ if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil {
+ return err
+ }
+
+ // value enum
+ if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error {
+
+ if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used
+func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaPayloadHash
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaProposedContent DSSE v001 schema proposed content
+//
+// swagger:model DSSEV001SchemaProposedContent
+type DSSEV001SchemaProposedContent struct {
+
+ // DSSE envelope specified as a stringified JSON object
+ // Required: true
+ Envelope *string `json:"envelope"`
+
+ // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings
+ // Required: true
+ // Min Items: 1
+ Verifiers []strfmt.Base64 `json:"verifiers"`
+}
+
+// Validate validates this DSSE v001 schema proposed content
+func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEnvelope(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifiers(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error {
+
+ if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil {
+ return err
+ }
+
+ iVerifiersSize := int64(len(m.Verifiers))
+
+ if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema proposed content based on context it is used
+func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaProposedContent
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
+
+// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature
+//
+// swagger:model DSSEV001SchemaSignaturesItems0
+type DSSEV001SchemaSignaturesItems0 struct {
+
+ // base64 encoded signature of the payload
+ // Required: true
+ // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$
+ Signature *string `json:"signature"`
+
+ // verification material that was used to verify the corresponding signature, specified as a base64 encoded string
+ // Required: true
+ // Format: byte
+ Verifier *strfmt.Base64 `json:"verifier"`
+}
+
+// Validate validates this DSSE v001 schema signatures items0
+func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateSignature(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateVerifier(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error {
+
+ if err := validate.Required("signature", "body", m.Signature); err != nil {
+ return err
+ }
+
+ if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error {
+
+ if err := validate.Required("verifier", "body", m.Verifier); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used
+func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error {
+ var res DSSEV001SchemaSignaturesItems0
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
index 3297e5a91d..816435cb24 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
@@ -450,25 +450,25 @@ type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct {
Keyid string `json:"keyid,omitempty"`
// public key that corresponds to this signature
- // Read Only: true
+ // Required: true
// Format: byte
- PublicKey strfmt.Base64 `json:"publicKey,omitempty"`
+ PublicKey *strfmt.Base64 `json:"publicKey"`
// signature of the payload
+ // Required: true
// Format: byte
- Sig strfmt.Base64 `json:"sig,omitempty"`
+ Sig *strfmt.Base64 `json:"sig"`
}
// Validate validates this intoto v002 schema content envelope signatures items0
func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error {
- return nil
-}
-
-// ContextValidate validate this intoto v002 schema content envelope signatures items0 based on the context it is used
-func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
- if err := m.contextValidatePublicKey(ctx, formats); err != nil {
+ if err := m.validatePublicKey(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSig(formats); err != nil {
res = append(res, err)
}
@@ -478,15 +478,29 @@ func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx co
return nil
}
-func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error {
+
+ if err := validate.Required("publicKey", "body", m.PublicKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error {
- if err := validate.ReadOnly(ctx, "publicKey", "body", strfmt.Base64(m.PublicKey)); err != nil {
+ if err := validate.Required("sig", "body", m.Sig); err != nil {
return err
}
return nil
}
+// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used
+func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
// MarshalBinary interface implementation
func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) {
if m == nil {
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
index 76b28019cb..5b734a5fff 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go
@@ -126,6 +126,12 @@ func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEnt
return nil, err
}
return &result, nil
+ case "dsse":
+ var result DSSE
+ if err := consumer.Consume(buf2, &result); err != nil {
+ return nil, err
+ }
+ return &result, nil
case "hashedrekord":
var result Hashedrekord
if err := consumer.Consume(buf2, &result); err != nil {
diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
index f8bf4b020d..db5d8a3a92 100644
--- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
+++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go
@@ -195,11 +195,30 @@ func (m *TUFV001Schema) UnmarshalBinary(b []byte) error {
type TUFV001SchemaMetadata struct {
// Specifies the metadata inline within the document
- Content interface{} `json:"content,omitempty"`
+ // Required: true
+ Content interface{} `json:"content"`
}
// Validate validates this TUF v001 schema metadata
func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateContent(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error {
+
+ if m.Content == nil {
+ return errors.Required("metadata"+"."+"content", "body", nil)
+ }
+
return nil
}
diff --git a/vendor/github.com/sigstore/rekor/pkg/log/log.go b/vendor/github.com/sigstore/rekor/pkg/log/log.go
deleted file mode 100644
index 413b604f78..0000000000
--- a/vendor/github.com/sigstore/rekor/pkg/log/log.go
+++ /dev/null
@@ -1,115 +0,0 @@
-//
-// Copyright 2021 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package log
-
-import (
- "context"
- "log"
-
- "github.com/go-chi/chi/middleware"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-// Logger set the default logger to development mode
-var Logger *zap.SugaredLogger
-
-func init() {
- ConfigureLogger("dev")
-}
-
-func ConfigureLogger(logType string) {
- var cfg zap.Config
- if logType == "prod" {
- cfg = zap.NewProductionConfig()
- cfg.EncoderConfig.LevelKey = "severity"
- cfg.EncoderConfig.MessageKey = "message"
- cfg.EncoderConfig.TimeKey = "time"
- cfg.EncoderConfig.EncodeLevel = encodeLevel()
- cfg.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
- cfg.EncoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder
- cfg.EncoderConfig.EncodeCaller = zapcore.FullCallerEncoder
- } else {
- cfg = zap.NewDevelopmentConfig()
- cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
- }
- logger, err := cfg.Build()
- if err != nil {
- log.Fatalln("createLogger", err)
- }
- Logger = logger.Sugar()
-}
-
-func encodeLevel() zapcore.LevelEncoder {
- return func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
- switch l {
- case zapcore.DebugLevel:
- enc.AppendString("DEBUG")
- case zapcore.InfoLevel:
- enc.AppendString("INFO")
- case zapcore.WarnLevel:
- enc.AppendString("WARNING")
- case zapcore.ErrorLevel:
- enc.AppendString("ERROR")
- case zapcore.DPanicLevel:
- enc.AppendString("CRITICAL")
- case zapcore.PanicLevel:
- enc.AppendString("ALERT")
- case zapcore.FatalLevel:
- enc.AppendString("EMERGENCY")
- }
- }
-}
-
-var CliLogger = createCliLogger()
-
-func createCliLogger() *zap.SugaredLogger {
- cfg := zap.NewDevelopmentConfig()
- cfg.EncoderConfig.TimeKey = ""
- cfg.EncoderConfig.LevelKey = ""
- cfg.DisableCaller = true
- cfg.DisableStacktrace = true
- logger, err := cfg.Build()
- if err != nil {
- log.Fatalln("createLogger", err)
- }
-
- return logger.Sugar()
-}
-
-func WithRequestID(ctx context.Context, id string) context.Context {
- return context.WithValue(ctx, middleware.RequestIDKey, id)
-}
-
-type operation struct {
- id string
-}
-
-func (o operation) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddString("id", o.id)
- return nil
-}
-
-func ContextLogger(ctx context.Context) *zap.SugaredLogger {
- proposedLogger := Logger
- if ctx != nil {
- if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok {
- requestID := operation{ctxRequestID}
- proposedLogger = proposedLogger.With(zap.Object("operation", requestID))
- }
- }
- return proposedLogger
-}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go b/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go
deleted file mode 100644
index 06c49ca23a..0000000000
--- a/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright 2021 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "context"
- "crypto/ecdsa"
- "errors"
-
- "github.com/sigstore/rekor/pkg/generated/client"
- "github.com/sigstore/rekor/pkg/generated/client/pubkey"
- "github.com/sigstore/sigstore/pkg/cryptoutils"
-)
-
-func PublicKey(ctx context.Context, c *client.Rekor) (*ecdsa.PublicKey, error) {
- resp, err := c.Pubkey.GetPublicKey(&pubkey.GetPublicKeyParams{Context: ctx})
- if err != nil {
- return nil, err
- }
-
- // marshal the pubkey
- pubKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(resp.GetPayload()))
- if err != nil {
- return nil, err
- }
- ed, ok := pubKey.(*ecdsa.PublicKey)
- if !ok {
- return nil, errors.New("public key retrieved from Rekor is not an ECDSA key")
- }
- return ed, nil
-}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go b/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go
deleted file mode 100644
index d2f44c2887..0000000000
--- a/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go
+++ /dev/null
@@ -1,171 +0,0 @@
-//
-// Copyright 2021 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "bytes"
- "encoding/base64"
- "errors"
- "fmt"
- "net/url"
- "strconv"
- "strings"
- "time"
-)
-
-// Signed note based timestamp responses
-
-type TimestampNote struct {
- // Origin is the unique identifier/version string
- Origin string
- // MessageImprint is the hash of the message to timestamp, of the form sha256:
- MessageImprint string
- // Nonce is a short random bytes to prove response freshness
- Nonce []byte
- // Time is the timestamp to imprint on the message
- Time time.Time
- // Radius is the time in microseconds used to indicate certainty
- Radius int64
- // CertChainRef is a reference URL to the valid timestamping cert chain used to sign the response
- CertChainRef *url.URL
- // OtherContent is any additional data to be included in the signed payload; each element is assumed to be one line
- OtherContent []string
-}
-
-// String returns the String representation of the TimestampNote
-func (t TimestampNote) String() string {
- var b strings.Builder
- time, _ := t.Time.MarshalText()
- fmt.Fprintf(&b, "%s\n%s\n%s\n%s\n%d\n%s\n", t.Origin, t.MessageImprint, base64.StdEncoding.EncodeToString(t.Nonce),
- time, t.Radius, t.CertChainRef)
- for _, line := range t.OtherContent {
- fmt.Fprintf(&b, "%s\n", line)
- }
- return b.String()
-}
-
-// MarshalText returns the common format representation of this TimestampNote.
-func (t TimestampNote) MarshalText() ([]byte, error) {
- return []byte(t.String()), nil
-}
-
-// UnmarshalText parses the common formatted timestamp note data and stores the result
-// in the TimestampNote.
-//
-// The supplied data is expected to begin with the following 6 lines of text,
-// each followed by a newline:
-//
-//
-//
-//
-//
-//
-// ...
-// ...
-//
-// This will discard any content found after the checkpoint (including signatures)
-func (t *TimestampNote) UnmarshalText(data []byte) error {
- l := bytes.Split(data, []byte("\n"))
- if len(l) < 7 {
- return errors.New("invalid timestamp note - too few newlines")
- }
- origin := string(l[0])
- if len(origin) == 0 {
- return errors.New("invalid timestamp note - empty ecosystem")
- }
- h := string(l[1])
- if err := ValidateSHA256Value(h); err != nil {
- return fmt.Errorf("invalid timestamp note - invalid message hash: %w", err)
- }
-
- nonce, err := base64.StdEncoding.DecodeString(string(l[2]))
- if err != nil {
- return fmt.Errorf("invalid timestamp note - invalid nonce: %w", err)
- }
- var timestamp time.Time
- if err := timestamp.UnmarshalText(l[3]); err != nil {
- return fmt.Errorf("invalid timestamp note - invalid time: %w", err)
- }
- r, err := strconv.ParseInt(string(l[4]), 10, 64)
- if err != nil {
- return fmt.Errorf("invalid timestamp note - invalid radius: %w", err)
- }
- u, err := url.Parse(string(l[5]))
- if err != nil {
- return fmt.Errorf("invalid timestamp note - invalid URI: %w", err)
-
- }
- *t = TimestampNote{
- Origin: origin,
- MessageImprint: h,
- Nonce: nonce,
- Time: timestamp,
- Radius: r,
- CertChainRef: u,
- }
- if len(l) >= 8 {
- for _, line := range l[6:] {
- if len(line) == 0 {
- break
- }
- t.OtherContent = append(t.OtherContent, string(line))
- }
- }
- return nil
-}
-
-type SignedTimestampNote struct {
- TimestampNote
- SignedNote
-}
-
-func CreateSignedTimestampNote(t TimestampNote) (*SignedTimestampNote, error) {
- text, err := t.MarshalText()
- if err != nil {
- return nil, err
- }
- return &SignedTimestampNote{
- TimestampNote: t,
- SignedNote: SignedNote{Note: string(text)},
- }, nil
-}
-
-func SignedTimestampNoteValidator(strToValidate string) bool {
- s := SignedNote{}
- if err := s.UnmarshalText([]byte(strToValidate)); err != nil {
- return false
- }
- c := &TimestampNote{}
- return c.UnmarshalText([]byte(s.Note)) == nil
-}
-
-func TimestampNoteValidator(strToValidate string) bool {
- c := &TimestampNote{}
- return c.UnmarshalText([]byte(strToValidate)) == nil
-}
-
-func (r *SignedTimestampNote) UnmarshalText(data []byte) error {
- s := SignedNote{}
- if err := s.UnmarshalText([]byte(data)); err != nil {
- return fmt.Errorf("unmarshalling signed note: %w", err)
- }
- t := TimestampNote{}
- if err := t.UnmarshalText([]byte(s.Note)); err != nil {
- return fmt.Errorf("unmarshalling timestamp note: %w", err)
- }
- *r = SignedTimestampNote{TimestampNote: t, SignedNote: s}
- return nil
-}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go b/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go
deleted file mode 100644
index e1d0d5d6da..0000000000
--- a/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go
+++ /dev/null
@@ -1,380 +0,0 @@
-//
-// Copyright 2021 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "time"
-
- "github.com/sigstore/rekor/pkg/log"
- "github.com/transparency-dev/merkle/proof"
- "github.com/transparency-dev/merkle/rfc6962"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/types/known/durationpb"
-
- "github.com/google/trillian"
- "github.com/google/trillian/client"
- "github.com/google/trillian/types"
-)
-
-// TrillianClient provides a wrapper around the Trillian client
-type TrillianClient struct {
- client trillian.TrillianLogClient
- logID int64
- context context.Context
-}
-
-// NewTrillianClient creates a TrillianClient with the given Trillian client and log/tree ID.
-func NewTrillianClient(ctx context.Context, logClient trillian.TrillianLogClient, logID int64) TrillianClient {
- return TrillianClient{
- client: logClient,
- logID: logID,
- context: ctx,
- }
-}
-
-// Response includes a status code, an optional error message, and one of the results based on the API call
-type Response struct {
- // Status is the status code of the response
- Status codes.Code
- // Error contains an error on request or client failure
- Err error
- // GetAddResult contains the response from queueing a leaf in Trillian
- GetAddResult *trillian.QueueLeafResponse
- // GetLeafAndProofResult contains the response for fetching an inclusion proof and leaf
- GetLeafAndProofResult *trillian.GetEntryAndProofResponse
- // GetLatestResult contains the response for the latest checkpoint
- GetLatestResult *trillian.GetLatestSignedLogRootResponse
- // GetConsistencyProofResult contains the response for a consistency proof between two log sizes
- GetConsistencyProofResult *trillian.GetConsistencyProofResponse
- // getProofResult contains the response for an inclusion proof fetched by leaf hash
- getProofResult *trillian.GetInclusionProofByHashResponse
-}
-
-func unmarshalLogRoot(logRoot []byte) (types.LogRootV1, error) {
- var root types.LogRootV1
- if err := root.UnmarshalBinary(logRoot); err != nil {
- return types.LogRootV1{}, err
- }
- return root, nil
-}
-
-func (t *TrillianClient) root() (types.LogRootV1, error) {
- rqst := &trillian.GetLatestSignedLogRootRequest{
- LogId: t.logID,
- }
- resp, err := t.client.GetLatestSignedLogRoot(t.context, rqst)
- if err != nil {
- return types.LogRootV1{}, err
- }
- return unmarshalLogRoot(resp.SignedLogRoot.LogRoot)
-}
-
-func (t *TrillianClient) AddLeaf(byteValue []byte) *Response {
- leaf := &trillian.LogLeaf{
- LeafValue: byteValue,
- }
- rqst := &trillian.QueueLeafRequest{
- LogId: t.logID,
- Leaf: leaf,
- }
- resp, err := t.client.QueueLeaf(t.context, rqst)
-
- // check for error
- if err != nil || (resp.QueuedLeaf.Status != nil && resp.QueuedLeaf.Status.Code != int32(codes.OK)) {
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetAddResult: resp,
- }
- }
-
- root, err := t.root()
- if err != nil {
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetAddResult: resp,
- }
- }
- v := client.NewLogVerifier(rfc6962.DefaultHasher)
- logClient := client.New(t.logID, t.client, v, root)
-
- waitForInclusion := func(ctx context.Context, leafHash []byte) *Response {
- if logClient.MinMergeDelay > 0 {
- select {
- case <-ctx.Done():
- return &Response{
- Status: codes.DeadlineExceeded,
- Err: ctx.Err(),
- }
- case <-time.After(logClient.MinMergeDelay):
- }
- }
- for {
- root = *logClient.GetRoot()
- if root.TreeSize >= 1 {
- proofResp := t.getProofByHash(resp.QueuedLeaf.Leaf.MerkleLeafHash)
- // if this call succeeds or returns an error other than "not found", return
- if proofResp.Err == nil || (proofResp.Err != nil && status.Code(proofResp.Err) != codes.NotFound) {
- return proofResp
- }
- // otherwise wait for a root update before trying again
- }
-
- if _, err := logClient.WaitForRootUpdate(ctx); err != nil {
- return &Response{
- Status: codes.Unknown,
- Err: err,
- }
- }
- }
- }
-
- proofResp := waitForInclusion(t.context, resp.QueuedLeaf.Leaf.MerkleLeafHash)
- if proofResp.Err != nil {
- return &Response{
- Status: status.Code(proofResp.Err),
- Err: proofResp.Err,
- GetAddResult: resp,
- }
- }
-
- proofs := proofResp.getProofResult.Proof
- if len(proofs) != 1 {
- err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(resp.QueuedLeaf.Leaf.MerkleLeafHash), len(proofs))
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetAddResult: resp,
- }
- }
-
- leafIndex := proofs[0].LeafIndex
- leafResp := t.GetLeafAndProofByIndex(leafIndex)
- if leafResp.Err != nil {
- return &Response{
- Status: status.Code(leafResp.Err),
- Err: leafResp.Err,
- GetAddResult: resp,
- }
- }
-
- // overwrite queued leaf that doesn't have index set
- resp.QueuedLeaf.Leaf = leafResp.GetLeafAndProofResult.Leaf
-
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetAddResult: resp,
- // include getLeafAndProofResult for inclusion proof
- GetLeafAndProofResult: leafResp.GetLeafAndProofResult,
- }
-}
-
-func (t *TrillianClient) GetLeafAndProofByHash(hash []byte) *Response {
- // get inclusion proof for hash, extract index, then fetch leaf using index
- proofResp := t.getProofByHash(hash)
- if proofResp.Err != nil {
- return &Response{
- Status: status.Code(proofResp.Err),
- Err: proofResp.Err,
- }
- }
-
- proofs := proofResp.getProofResult.Proof
- if len(proofs) != 1 {
- err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(hash), len(proofs))
- return &Response{
- Status: status.Code(err),
- Err: err,
- }
- }
-
- return t.GetLeafAndProofByIndex(proofs[0].LeafIndex)
-}
-
-func (t *TrillianClient) GetLeafAndProofByIndex(index int64) *Response {
- ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
- defer cancel()
-
- rootResp := t.GetLatest(0)
- if rootResp.Err != nil {
- return &Response{
- Status: status.Code(rootResp.Err),
- Err: rootResp.Err,
- }
- }
-
- root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
- if err != nil {
- return &Response{
- Status: status.Code(rootResp.Err),
- Err: rootResp.Err,
- }
- }
-
- resp, err := t.client.GetEntryAndProof(ctx,
- &trillian.GetEntryAndProofRequest{
- LogId: t.logID,
- LeafIndex: index,
- TreeSize: int64(root.TreeSize),
- })
-
- if resp != nil && resp.Proof != nil {
- if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(index), root.TreeSize, resp.GetLeaf().MerkleLeafHash, resp.Proof.Hashes, root.RootHash); err != nil {
- return &Response{
- Status: status.Code(err),
- Err: err,
- }
- }
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetLeafAndProofResult: &trillian.GetEntryAndProofResponse{
- Proof: resp.Proof,
- Leaf: resp.Leaf,
- SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
- },
- }
- }
-
- return &Response{
- Status: status.Code(err),
- Err: err,
- }
-}
-
-func (t *TrillianClient) GetLatest(leafSizeInt int64) *Response {
-
- ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
- defer cancel()
-
- resp, err := t.client.GetLatestSignedLogRoot(ctx,
- &trillian.GetLatestSignedLogRootRequest{
- LogId: t.logID,
- FirstTreeSize: leafSizeInt,
- })
-
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetLatestResult: resp,
- }
-}
-
-func (t *TrillianClient) GetConsistencyProof(firstSize, lastSize int64) *Response {
-
- ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
- defer cancel()
-
- resp, err := t.client.GetConsistencyProof(ctx,
- &trillian.GetConsistencyProofRequest{
- LogId: t.logID,
- FirstTreeSize: firstSize,
- SecondTreeSize: lastSize,
- })
-
- return &Response{
- Status: status.Code(err),
- Err: err,
- GetConsistencyProofResult: resp,
- }
-}
-
-func (t *TrillianClient) getProofByHash(hashValue []byte) *Response {
- ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
- defer cancel()
-
- rootResp := t.GetLatest(0)
- if rootResp.Err != nil {
- return &Response{
- Status: status.Code(rootResp.Err),
- Err: rootResp.Err,
- }
- }
- root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
- if err != nil {
- return &Response{
- Status: status.Code(rootResp.Err),
- Err: rootResp.Err,
- }
- }
-
- // issue 1308: if the tree is empty, there's no way we can return a proof
- if root.TreeSize == 0 {
- return &Response{
- Status: codes.NotFound,
- Err: status.Error(codes.NotFound, "tree is empty"),
- }
- }
-
- resp, err := t.client.GetInclusionProofByHash(ctx,
- &trillian.GetInclusionProofByHashRequest{
- LogId: t.logID,
- LeafHash: hashValue,
- TreeSize: int64(root.TreeSize),
- })
-
- if resp != nil {
- v := client.NewLogVerifier(rfc6962.DefaultHasher)
- for _, proof := range resp.Proof {
- if err := v.VerifyInclusionByHash(&root, hashValue, proof); err != nil {
- return &Response{
- Status: status.Code(err),
- Err: err,
- }
- }
- }
- // Return an inclusion proof response with the requested
- return &Response{
- Status: status.Code(err),
- Err: err,
- getProofResult: &trillian.GetInclusionProofByHashResponse{
- Proof: resp.Proof,
- SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
- },
- }
- }
-
- return &Response{
- Status: status.Code(err),
- Err: err,
- }
-}
-
-func CreateAndInitTree(ctx context.Context, adminClient trillian.TrillianAdminClient, logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
- t, err := adminClient.CreateTree(ctx, &trillian.CreateTreeRequest{
- Tree: &trillian.Tree{
- TreeType: trillian.TreeType_LOG,
- TreeState: trillian.TreeState_ACTIVE,
- MaxRootDuration: durationpb.New(time.Hour),
- },
- })
- if err != nil {
- return nil, fmt.Errorf("create tree: %w", err)
- }
-
- if err := client.InitLog(ctx, t, logClient); err != nil {
- return nil, fmt.Errorf("init log: %w", err)
- }
- log.Logger.Infof("Created new tree with ID: %v", t.TreeId)
- return t, nil
-}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/util.go b/vendor/github.com/sigstore/rekor/pkg/util/util.go
index fa0e82336c..78c1a0f513 100644
--- a/vendor/github.com/sigstore/rekor/pkg/util/util.go
+++ b/vendor/github.com/sigstore/rekor/pkg/util/util.go
@@ -32,8 +32,6 @@ import (
"time"
"golang.org/x/crypto/openpgp"
-
- "github.com/sigstore/rekor/pkg/generated/models"
)
var (
@@ -377,19 +375,6 @@ func CreateArtifact(t *testing.T, artifactPath string) string {
return artifact
}
-func extractLogEntry(t *testing.T, le models.LogEntry) models.LogEntryAnon {
- t.Helper()
-
- if len(le) != 1 {
- t.Fatal("expected length to be 1, is actually", len(le))
- }
- for _, v := range le {
- return v
- }
- // this should never happen
- return models.LogEntryAnon{}
-}
-
func write(t *testing.T, data string, path string) {
t.Helper()
if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil {
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/validate.go b/vendor/github.com/sigstore/rekor/pkg/util/validate.go
deleted file mode 100644
index f8f015fbd1..0000000000
--- a/vendor/github.com/sigstore/rekor/pkg/util/validate.go
+++ /dev/null
@@ -1,92 +0,0 @@
-//
-// Copyright 2021 The Sigstore Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package util
-
-import (
- "strings"
-
- validator "github.com/go-playground/validator/v10"
-)
-
-// validateSHA512Value ensures that the supplied string matches the
-// following format: [sha512:]<128 hexadecimal characters>
-// where [sha512:] is optional
-func ValidateSHA512Value(v string) error {
- var prefix, hash string
-
- split := strings.SplitN(v, ":", 2)
- switch len(split) {
- case 1:
- hash = split[0]
- case 2:
- prefix = split[0]
- hash = split[1]
- }
-
- s := struct {
- Prefix string `validate:"omitempty,oneof=sha512"`
- Hash string `validate:"required,len=128,hexadecimal"`
- }{prefix, hash}
-
- validate := validator.New()
- return validate.Struct(s)
-}
-
-// validateSHA256Value ensures that the supplied string matches the following format:
-// [sha256:]<64 hexadecimal characters>
-// where [sha256:] is optional
-func ValidateSHA256Value(v string) error {
- var prefix, hash string
-
- split := strings.SplitN(v, ":", 2)
- switch len(split) {
- case 1:
- hash = split[0]
- case 2:
- prefix = split[0]
- hash = split[1]
- }
-
- s := struct {
- Prefix string `validate:"omitempty,oneof=sha256"`
- Hash string `validate:"required,len=64,hexadecimal"`
- }{prefix, hash}
-
- validate := validator.New()
- return validate.Struct(s)
-}
-
-func ValidateSHA1Value(v string) error {
- var prefix, hash string
-
- split := strings.SplitN(v, ":", 2)
- switch len(split) {
- case 1:
- hash = split[0]
- case 2:
- prefix = split[0]
- hash = split[1]
- }
-
- s := struct {
- Prefix string `validate:"omitempty,oneof=sha1"`
- Hash string `validate:"required,len=40,hexadecimal"`
- }{prefix, hash}
-
- validate := validator.New()
- return validate.Struct(s)
-
-}
diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
deleted file mode 100644
index 43de4c9d47..0000000000
--- a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# How to contribute #
-
-We'd love to accept your patches and contributions to this project. There are
-a just a few small guidelines you need to follow.
-
-
-## Contributor License Agreement ##
-
-Contributions to any Google project must be accompanied by a Contributor
-License Agreement. This is not a copyright **assignment**, it simply gives
-Google permission to use and redistribute your contributions as part of the
-project.
-
- * If you are an individual writing original source code and you're sure you
- own the intellectual property, then you'll need to sign an [individual
- CLA][].
-
- * If you work for a company that wants to allow you to contribute your work,
- then you'll need to sign a [corporate CLA][].
-
-You generally only need to submit a CLA once, so if you've already submitted
-one (even if it was for a different project), you probably don't need to do it
-again.
-
-[individual CLA]: https://developers.google.com/open-source/cla/individual
-[corporate CLA]: https://developers.google.com/open-source/cla/corporate
-
-Once your CLA is submitted (or if you already submitted one for
-another Google project), make a commit adding yourself to the
-[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
-of your first [pull request][].
-
-[AUTHORS]: AUTHORS
-[CONTRIBUTORS]: CONTRIBUTORS
-
-
-## Submitting a patch ##
-
- 1. It's generally best to start by opening a new issue describing the bug or
- feature you're intending to fix. Even if you think it's relatively minor,
- it's helpful to know what people are working on. Mention in the initial
- issue that you are planning to work on that bug or feature so that it can
- be assigned to you.
-
- 1. Follow the normal process of [forking][] the project, and setup a new
- branch to work in. It's important that each group of changes be done in
- separate branches in order to ensure that a pull request only includes the
- commits related to that bug or feature.
-
- 1. Do your best to have [well-formed commit messages][] for each change.
- This provides consistency throughout the project, and ensures that commit
- messages are able to be formatted properly by various git tools.
-
- 1. Finally, push the commits to your fork and submit a [pull request][].
-
-[forking]: https://help.github.com/articles/fork-a-repo
-[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
-[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/vendor/github.com/transparency-dev/merkle/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md
deleted file mode 100644
index 3c8d212711..0000000000
--- a/vendor/github.com/transparency-dev/merkle/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Merkle
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle)
-[![Go Report
-Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle)
-[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle)
-[![Slack
-Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/)
-
-## Overview
-
-This repository contains Go code to help create and manipulate Merkle trees, as
-well as constructing and verifying various types of proof.
-
-This is the data structure which is used by projects such as
-[Trillian](https://github.com/google/trillian) to provide
-[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log).
-
-
-## Support
-* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
-* Slack: https://gtrillian.slack.com/ (invitation)
-
-
-
diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go
deleted file mode 100644
index c53a96a4c3..0000000000
--- a/vendor/github.com/transparency-dev/merkle/compact/nodes.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compact
-
-import "math/bits"
-
-// NodeID identifies a node of a Merkle tree.
-//
-// The ID consists of a level and index within this level. Levels are numbered
-// from 0, which corresponds to the tree leaves. Within each level, nodes are
-// numbered with consecutive indices starting from 0.
-//
-// L4: ┌───────0───────┐ ...
-// L3: ┌───0───┐ ┌───1───┐ ┌─── ...
-// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ...
-// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ...
-// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ...
-//
-// When the tree is not perfect, the nodes that would complement it to perfect
-// are called ephemeral. Algorithms that operate with ephemeral nodes still map
-// them to the same address space.
-type NodeID struct {
- Level uint
- Index uint64
-}
-
-// NewNodeID returns a NodeID with the passed in node coordinates.
-func NewNodeID(level uint, index uint64) NodeID {
- return NodeID{Level: level, Index: index}
-}
-
-// Parent returns the ID of the parent node.
-func (id NodeID) Parent() NodeID {
- return NewNodeID(id.Level+1, id.Index>>1)
-}
-
-// Sibling returns the ID of the sibling node.
-func (id NodeID) Sibling() NodeID {
- return NewNodeID(id.Level, id.Index^1)
-}
-
-// Coverage returns the [begin, end) range of leaves covered by the node.
-func (id NodeID) Coverage() (uint64, uint64) {
- return id.Index << id.Level, (id.Index + 1) << id.Level
-}
-
-// RangeNodes appends the IDs of the nodes that comprise the [begin, end)
-// compact range to the given slice, and returns the new slice. The caller may
-// pre-allocate space with the help of the RangeSize function.
-func RangeNodes(begin, end uint64, ids []NodeID) []NodeID {
- left, right := Decompose(begin, end)
-
- pos := begin
- // Iterate over perfect subtrees along the left border of the range, ordered
- // from lower to upper levels.
- for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit {
- level := uint(bits.TrailingZeros64(left))
- bit = uint64(1) << level
- ids = append(ids, NewNodeID(level, pos>>level))
- }
-
- // Iterate over perfect subtrees along the right border of the range, ordered
- // from upper to lower levels.
- for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit {
- level := uint(bits.Len64(right)) - 1
- bit = uint64(1) << level
- ids = append(ids, NewNodeID(level, pos>>level))
- }
-
- return ids
-}
-
-// RangeSize returns the number of nodes in the [begin, end) compact range.
-func RangeSize(begin, end uint64) int {
- left, right := Decompose(begin, end)
- return bits.OnesCount64(left) + bits.OnesCount64(right)
-}
diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go
deleted file mode 100644
index a34c0be973..0000000000
--- a/vendor/github.com/transparency-dev/merkle/compact/range.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2019 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package compact provides compact Merkle tree data structures.
-package compact
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/bits"
-)
-
-// HashFn computes an internal node's hash using the hashes of its child nodes.
-type HashFn func(left, right []byte) []byte
-
-// VisitFn visits the node with the specified ID and hash.
-type VisitFn func(id NodeID, hash []byte)
-
-// RangeFactory allows creating compact ranges with the specified hash
-// function, which must not be nil, and must not be changed.
-type RangeFactory struct {
- Hash HashFn
-}
-
-// NewRange creates a Range for [begin, end) with the given set of hashes. The
-// hashes correspond to the roots of the minimal set of perfect sub-trees
-// covering the [begin, end) leaves range, ordered left to right.
-func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) {
- if end < begin {
- return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin)
- }
- if got, want := len(hashes), RangeSize(begin, end); got != want {
- return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want)
- }
- return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil
-}
-
-// NewEmptyRange returns a new Range for an empty [begin, begin) range. The
-// value of begin defines where the range will start growing from when entries
-// are appended to it.
-func (f *RangeFactory) NewEmptyRange(begin uint64) *Range {
- return &Range{f: f, begin: begin, end: begin}
-}
-
-// Range represents a compact Merkle tree range for leaf indices [begin, end).
-//
-// It contains the minimal set of perfect subtrees whose leaves comprise this
-// range. The structure is efficiently mergeable with other compact ranges that
-// share one of the endpoints with it.
-//
-// For more details, see
-// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md.
-type Range struct {
- f *RangeFactory
- begin uint64
- end uint64
- hashes [][]byte
-}
-
-// Begin returns the first index covered by the range (inclusive).
-func (r *Range) Begin() uint64 {
- return r.begin
-}
-
-// End returns the last index covered by the range (exclusive).
-func (r *Range) End() uint64 {
- return r.end
-}
-
-// Hashes returns sub-tree hashes corresponding to the minimal set of perfect
-// sub-trees covering the [begin, end) range, ordered left to right.
-func (r *Range) Hashes() [][]byte {
- return r.hashes
-}
-
-// Append extends the compact range by appending the passed in hash to it. It
-// reports all the added nodes through the visitor function (if non-nil).
-func (r *Range) Append(hash []byte, visitor VisitFn) error {
- if visitor != nil {
- visitor(NewNodeID(0, r.end), hash)
- }
- return r.appendImpl(r.end+1, hash, nil, visitor)
-}
-
-// AppendRange extends the compact range by merging in the other compact range
-// from the right. It uses the tree hasher to calculate hashes of newly created
-// nodes, and reports them through the visitor function (if non-nil).
-func (r *Range) AppendRange(other *Range, visitor VisitFn) error {
- if other.f != r.f {
- return errors.New("incompatible ranges")
- }
- if got, want := other.begin, r.end; got != want {
- return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want)
- }
- if len(other.hashes) == 0 { // The other range is empty, merging is trivial.
- return nil
- }
- return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor)
-}
-
-// GetRootHash returns the root hash of the Merkle tree represented by this
-// compact range. Requires the range to start at index 0. If the range is
-// empty, returns nil.
-//
-// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the
-// ones rooting imperfect subtrees) along the right border of the tree.
-func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) {
- if r.begin != 0 {
- return nil, fmt.Errorf("begin=%d, want 0", r.begin)
- }
- ln := len(r.hashes)
- if ln == 0 {
- return nil, nil
- }
- hash := r.hashes[ln-1]
- // All non-perfect subtree hashes along the right border of the tree
- // correspond to the parents of all perfect subtree nodes except the lowest
- // one (therefore the loop skips it).
- for i, size := ln-2, r.end; i >= 0; i-- {
- hash = r.f.Hash(r.hashes[i], hash)
- if visitor != nil {
- size &= size - 1 // Delete the previous node.
- level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level.
- index := size >> level // And its horizontal index.
- visitor(NewNodeID(level, index), hash)
- }
- }
- return hash, nil
-}
-
-// Equal compares two Ranges for equality.
-func (r *Range) Equal(other *Range) bool {
- if r.f != other.f || r.begin != other.begin || r.end != other.end {
- return false
- }
- if len(r.hashes) != len(other.hashes) {
- return false
- }
- for i := range r.hashes {
- if !bytes.Equal(r.hashes[i], other.hashes[i]) {
- return false
- }
- }
- return true
-}
-
-// appendImpl extends the compact range by merging the [r.end, end) compact
-// range into it. The other compact range is decomposed into a seed hash and
-// all the other hashes (possibly none). The method uses the tree hasher to
-// calculate hashes of newly created nodes, and reports them through the
-// visitor function (if non-nil).
-func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error {
- // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node
- // merges that transforms the two compact ranges into one.
- low, high := getMergePath(r.begin, r.end, end)
- if high < low {
- high = low
- }
- index := r.end >> low
- // Now bits [0, high-low) of index encode the merge path.
-
- // The number of one bits in index is the number of nodes from the left range
- // that will be merged, and zero bits correspond to the nodes in the right
- // range. Below we make sure that both ranges have enough hashes, which can
- // be false only in case the data is corrupted in some way.
- ones := bits.OnesCount64(index & (1<<(high-low) - 1))
- if ln := len(r.hashes); ln < ones {
- return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones)
- }
- if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros {
- return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1)
- }
-
- // Some of the trailing nodes of the left compact range, and some of the
- // leading nodes of the right range, are sequentially merged with the seed,
- // according to the mask. All new nodes are reported through the visitor.
- idx1, idx2 := len(r.hashes), 0
- for h := low; h < high; h++ {
- if index&1 == 0 {
- seed = r.f.Hash(seed, hashes[idx2])
- idx2++
- } else {
- idx1--
- seed = r.f.Hash(r.hashes[idx1], seed)
- }
- index >>= 1
- if visitor != nil {
- visitor(NewNodeID(h+1, index), seed)
- }
- }
-
- // All nodes from both ranges that have not been merged are bundled together
- // with the "merged" seed node.
- r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...)
- r.end = end
- return nil
-}
-
-// getMergePath returns the merging path between the compact range [begin, mid)
-// and [mid, end). The path is represented as a range of bits within mid, with
-// bit indices [low, high). A bit value of 1 on level i of mid means that the
-// node on this level merges with the corresponding node in the left compact
-// range, whereas 0 represents merging with the right compact range. If the
-// path is empty then high <= low.
-//
-// The output is not specified if begin <= mid <= end doesn't hold, but the
-// function never panics.
-func getMergePath(begin, mid, end uint64) (uint, uint) {
- low := bits.TrailingZeros64(mid)
- high := 64
- if begin != 0 {
- high = bits.Len64(mid ^ (begin - 1))
- }
- if high2 := bits.Len64((mid - 1) ^ end); high2 < high {
- high = high2
- }
- return uint(low), uint(high - 1)
-}
-
-// Decompose splits the [begin, end) range into a minimal number of sub-ranges,
-// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for
-// some integers m, k >= 0.
-//
-// The sequence of sizes is returned encoded as bitmasks left and right, where:
-// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k
-// - left mask bits in LSB-to-MSB order encode the left part of the sequence
-// - right mask bits in MSB-to-LSB order encode the right part
-//
-// The corresponding values of m are not returned (they can be calculated from
-// begin and the sub-range sizes).
-//
-// For example, (begin, end) values of (0b110, 0b11101) would indicate a
-// sequence of tree sizes: 2,8; 8,4,1.
-//
-// The output is not specified if begin > end, but the function never panics.
-func Decompose(begin, end uint64) (uint64, uint64) {
- // Special case, as the code below works only if begin != 0, or end < 2^63.
- if begin == 0 {
- return 0, end
- }
- xbegin := begin - 1
- // Find where paths to leaves #begin-1 and #end diverge, and mask the upper
- // bits away, as only the nodes strictly below this point are in the range.
- d := bits.Len64(xbegin^end) - 1
- mask := uint64(1)<= size {
- return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size)
- }
- return nodes(index, 0, size).skipFirst(), nil
-}
-
-// Consistency returns the information on how to fetch and construct a
-// consistency proof between the two given tree sizes of a log Merkle tree. It
-// requires 0 <= size1 <= size2.
-func Consistency(size1, size2 uint64) (Nodes, error) {
- if size1 > size2 {
- return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2)
- }
- if size1 == size2 || size1 == 0 {
- return Nodes{IDs: []compact.NodeID{}}, nil
- }
-
- // Find the root of the biggest perfect subtree that ends at size1.
- level := uint(bits.TrailingZeros64(size1))
- index := (size1 - 1) >> level
- // The consistency proof consists of this node (except if size1 is a power of
- // two, in which case adding this node would be redundant because the client
- // is assumed to know it from a checkpoint), and nodes of the inclusion proof
- // into this node in the tree of size2.
- p := nodes(index, level, size2)
-
- // Handle the case when size1 is a power of 2.
- if index == 0 {
- return p.skipFirst(), nil
- }
- return p, nil
-}
-
-// nodes returns the node IDs necessary to prove that the (level, index) node
-// is included in the Merkle tree of the given size.
-func nodes(index uint64, level uint, size uint64) Nodes {
- // Compute the `fork` node, where the path from root to (level, index) node
- // diverges from the path to (0, size).
- //
- // The sibling of this node is the ephemeral node which represents a subtree
- // that is not complete in the tree of the given size. To compute the hash
- // of the ephemeral node, we need all the non-ephemeral nodes that cover the
- // same range of leaves.
- //
- // The `inner` variable is how many layers up from (level, index) the `fork`
- // and the ephemeral nodes are.
- inner := bits.Len64(index^(size>>level)) - 1
- fork := compact.NewNodeID(level+uint(inner), index>>inner)
-
- begin, end := fork.Coverage()
- left := compact.RangeSize(0, begin)
- right := compact.RangeSize(end, size)
-
- node := compact.NewNodeID(level, index)
- // Pre-allocate the exact number of nodes for the proof, in order:
- // - The seed node for which we are building the proof.
- // - The `inner` nodes at each level up to the fork node.
- // - The `right` nodes, comprising the ephemeral node.
- // - The `left` nodes, completing the coverage of the whole [0, size) range.
- nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node)
-
- // The first portion of the proof consists of the siblings for nodes of the
- // path going up to the level at which the ephemeral node appears.
- for ; node.Level < fork.Level; node = node.Parent() {
- nodes = append(nodes, node.Sibling())
- }
- // This portion of the proof covers the range [begin, end) under it. The
- // ranges to the left and to the right from it remain to be covered.
-
- // Add all the nodes (potentially none) that cover the right range, and
- // represent the ephemeral node. Reverse them so that the Rehash method can
- // process hashes in the convenient order, from lower to upper levels.
- len1 := len(nodes)
- nodes = compact.RangeNodes(end, size, nodes)
- reverse(nodes[len(nodes)-right:])
- len2 := len(nodes)
- // Add the nodes that cover the left range, ordered increasingly by level.
- nodes = compact.RangeNodes(0, begin, nodes)
- reverse(nodes[len(nodes)-left:])
-
- // nodes[len1:len2] contains the nodes representing the ephemeral node. If
- // it's empty, make it zero. Note that it can also contain a single node.
- // Depending on the preference of the layer above, it may or may not be
- // considered ephemeral.
- if len1 >= len2 {
- len1, len2 = 0, 0
- }
-
- return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()}
-}
-
-// Ephem returns the ephemeral node, and indices begin and end, such that
-// IDs[begin:end] slice contains the child nodes of the ephemeral node.
-//
-// The list is empty iff there are no ephemeral nodes in the proof. Some
-// examples of when this can happen: a proof in a perfect tree; an inclusion
-// proof for a leaf in a perfect subtree at the right edge of the tree.
-func (n Nodes) Ephem() (compact.NodeID, int, int) {
- return n.ephem, n.begin, n.end
-}
-
-// Rehash computes the proof based on the slice of node hashes corresponding to
-// their IDs in the n.IDs field. The slices must be of the same length. The hc
-// parameter computes a node's hash based on hashes of its children.
-//
-// Warning: The passed-in slice of hashes can be modified in-place.
-func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) {
- if got, want := len(h), len(n.IDs); got != want {
- return nil, fmt.Errorf("got %d hashes but expected %d", got, want)
- }
- cursor := 0
- // Scan the list of node hashes, and store the rehashed list in-place.
- // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the
- // rehashed list after scanning h up to index i-1.
- for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 {
- hash := h[i]
- if i >= n.begin && i < n.end {
- // Scan the block of node hashes that need rehashing.
- for i++; i < n.end; i++ {
- hash = hc(h[i], hash)
- }
- i--
- }
- h[cursor] = hash
- }
- return h[:cursor], nil
-}
-
-func (n Nodes) skipFirst() Nodes {
- n.IDs = n.IDs[1:]
- // Fixup the indices into the IDs slice.
- if n.begin < n.end {
- n.begin--
- n.end--
- }
- return n
-}
-
-func reverse(ids []compact.NodeID) {
- for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 {
- ids[i], ids[j] = ids[j], ids[i]
- }
-}
diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go
deleted file mode 100644
index d42e1afe36..0000000000
--- a/vendor/github.com/transparency-dev/merkle/proof/verify.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2017 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package proof
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/bits"
-
- "github.com/transparency-dev/merkle"
-)
-
-// RootMismatchError occurs when an inclusion proof fails.
-type RootMismatchError struct {
- ExpectedRoot []byte
- CalculatedRoot []byte
-}
-
-func (e RootMismatchError) Error() string {
- return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot)
-}
-
-func verifyMatch(calculated, expected []byte) error {
- if !bytes.Equal(calculated, expected) {
- return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated}
- }
- return nil
-}
-
-// VerifyInclusion verifies the correctness of the inclusion proof for the leaf
-// with the specified hash and index, relatively to the tree of the given size
-// and root hash. Requires 0 <= index < size.
-func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error {
- calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof)
- if err != nil {
- return err
- }
- return verifyMatch(calcRoot, root)
-}
-
-// RootFromInclusionProof calculates the expected root hash for a tree of the
-// given size, provided a leaf index and hash with the corresponding inclusion
-// proof. Requires 0 <= index < size.
-func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) {
- if index >= size {
- return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size)
- }
- if got, want := len(leafHash), hasher.Size(); got != want {
- return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want)
- }
-
- inner, border := decompInclProof(index, size)
- if got, want := len(proof), inner+border; got != want {
- return nil, fmt.Errorf("wrong proof size %d, want %d", got, want)
- }
-
- res := chainInner(hasher, leafHash, proof[:inner], index)
- res = chainBorderRight(hasher, res, proof[inner:])
- return res, nil
-}
-
-// VerifyConsistency checks that the passed-in consistency proof is valid
-// between the passed in tree sizes, with respect to the corresponding root
-// hashes. Requires 0 <= size1 <= size2.
-func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error {
- switch {
- case size2 < size1:
- return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2)
- case size1 == size2:
- if len(proof) > 0 {
- return errors.New("size1=size2, but proof is not empty")
- }
- return verifyMatch(root1, root2)
- case size1 == 0:
- // Any size greater than 0 is consistent with size 0.
- if len(proof) > 0 {
- return fmt.Errorf("expected empty proof, but got %d components", len(proof))
- }
- return nil // Proof OK.
- case len(proof) == 0:
- return errors.New("empty proof")
- }
-
- inner, border := decompInclProof(size1-1, size2)
- shift := bits.TrailingZeros64(size1)
- inner -= shift // Note: shift < inner if size1 < size2.
-
- // The proof includes the root hash for the sub-tree of size 2^shift.
- seed, start := proof[0], 1
- if size1 == 1<> uint(shift) // Start chaining from level |shift|.
- hash1 := chainInnerRight(hasher, seed, proof[:inner], mask)
- hash1 = chainBorderRight(hasher, hash1, proof[inner:])
- if err := verifyMatch(hash1, root1); err != nil {
- return err
- }
-
- // Verify the second root.
- hash2 := chainInner(hasher, seed, proof[:inner], mask)
- hash2 = chainBorderRight(hasher, hash2, proof[inner:])
- return verifyMatch(hash2, root2)
-}
-
-// decompInclProof breaks down inclusion proof for a leaf at the specified
-// |index| in a tree of the specified |size| into 2 components. The splitting
-// point between them is where paths to leaves |index| and |size-1| diverge.
-// Returns lengths of the bottom and upper proof parts correspondingly. The sum
-// of the two determines the correct length of the inclusion proof.
-func decompInclProof(index, size uint64) (int, int) {
- inner := innerProofSize(index, size)
- border := bits.OnesCount64(index >> uint(inner))
- return inner, border
-}
-
-func innerProofSize(index, size uint64) int {
- return bits.Len64(index ^ (size - 1))
-}
-
-// chainInner computes a subtree hash for a node on or below the tree's right
-// border. Assumes |proof| hashes are ordered from lower levels to upper, and
-// |seed| is the initial subtree/leaf hash on the path located at the specified
-// |index| on its level.
-func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
- for i, h := range proof {
- if (index>>uint(i))&1 == 0 {
- seed = hasher.HashChildren(seed, h)
- } else {
- seed = hasher.HashChildren(h, seed)
- }
- }
- return seed
-}
-
-// chainInnerRight computes a subtree hash like chainInner, but only takes
-// hashes to the left from the path into consideration, which effectively means
-// the result is a hash of the corresponding earlier version of this subtree.
-func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
- for i, h := range proof {
- if (index>>uint(i))&1 == 1 {
- seed = hasher.HashChildren(h, seed)
- }
- }
- return seed
-}
-
-// chainBorderRight chains proof hashes along tree borders. This differs from
-// inner chaining because |proof| contains only left-side subtree hashes.
-func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte {
- for _, h := range proof {
- seed = hasher.HashChildren(h, seed)
- }
- return seed
-}
diff --git a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
deleted file mode 100644
index b04f952ef8..0000000000
--- a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 Google LLC. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rfc6962 provides hashing functionality according to RFC6962.
-package rfc6962
-
-import (
- "crypto"
- _ "crypto/sha256" // SHA256 is the default algorithm.
-)
-
-// Domain separation prefixes
-const (
- RFC6962LeafHashPrefix = 0
- RFC6962NodeHashPrefix = 1
-)
-
-// DefaultHasher is a SHA256 based LogHasher.
-var DefaultHasher = New(crypto.SHA256)
-
-// Hasher implements the RFC6962 tree hashing algorithm.
-type Hasher struct {
- crypto.Hash
-}
-
-// New creates a new Hashers.LogHasher on the passed in hash function.
-func New(h crypto.Hash) *Hasher {
- return &Hasher{Hash: h}
-}
-
-// EmptyRoot returns a special case for an empty tree.
-func (t *Hasher) EmptyRoot() []byte {
- return t.New().Sum(nil)
-}
-
-// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf.
-// The data in leaf is prefixed by the LeafHashPrefix.
-func (t *Hasher) HashLeaf(leaf []byte) []byte {
- h := t.New()
- h.Write([]byte{RFC6962LeafHashPrefix})
- h.Write(leaf)
- return h.Sum(nil)
-}
-
-// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r.
-// The hashed structure is NodeHashPrefix||l||r.
-func (t *Hasher) HashChildren(l, r []byte) []byte {
- h := t.New()
- b := append(append(append(
- make([]byte, 0, 1+len(l)+len(r)),
- RFC6962NodeHashPrefix),
- l...),
- r...)
-
- h.Write(b)
- return h.Sum(nil)
-}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
deleted file mode 100644
index 571116cc39..0000000000
--- a/vendor/go.uber.org/atomic/.codecov.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 100 # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-
-# Also update COVER_IGNORE_PKGS in the Makefile.
-ignore:
- - /internal/gen-atomicint/
- - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
deleted file mode 100644
index 2e337a0ed5..0000000000
--- a/vendor/go.uber.org/atomic/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-/bin
-.DS_Store
-/vendor
-cover.html
-cover.out
-lint.log
-
-# Binaries
-*.test
-
-# Profiling output
-*.prof
-
-# Output of fossa analyzer
-/fossa
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
deleted file mode 100644
index 5fe03f21bd..0000000000
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [1.10.0] - 2022-08-11
-### Added
-- Add `atomic.Float32` type for atomic operations on `float32`.
-- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
- and `atomic.Value`.
-- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
- type. This is present only for Go 1.18 or higher, and is a drop-in for
- replacement for the standard library's `sync/atomic.Pointer` type.
-
-### Changed
-- Deprecate `CAS` methods on all types in favor of corresponding
- `CompareAndSwap` methods.
-
-Thanks to @eNV25 and @icpd for their contributions to this release.
-
-[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
-
-## [1.9.0] - 2021-07-15
-### Added
-- Add `Float64.Swap` to match int atomic operations.
-- Add `atomic.Time` type for atomic operations on `time.Time` values.
-
-[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
-
-## [1.8.0] - 2021-06-09
-### Added
-- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
-- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
-
-[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
-
-## [1.7.0] - 2020-09-14
-### Added
-- Support JSON serialization and deserialization of primitive atomic types.
-- Support Text marshalling and unmarshalling for string atomics.
-
-### Changed
-- Disallow incorrect comparison of atomic values in a non-atomic way.
-
-### Removed
-- Remove dependency on `golang.org/x/{lint, tools}`.
-
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-
-## [1.6.0] - 2020-02-24
-### Changed
-- Drop library dependency on `golang.org/x/{lint, tools}`.
-
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-
-## [1.5.1] - 2019-11-19
-- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
- causing `CAS` to fail even though the old value matches.
-
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-
-## [1.5.0] - 2019-10-29
-### Changed
-- With Go modules, only the `go.uber.org/atomic` import path is supported now.
- If you need to use the old import path, please add a `replace` directive to
- your `go.mod`.
-
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-
-## [1.4.0] - 2019-05-01
-### Added
- - Add `atomic.Error` type for atomic operations on `error` values.
-
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-
-## [1.3.2] - 2018-05-02
-### Added
-- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
-
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-
-## [1.3.1] - 2017-11-14
-### Fixed
-- Revert optimization for `atomic.String.Store("")` which caused data races.
-
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-
-## [1.3.0] - 2017-11-13
-### Added
-- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
-
-### Changed
-- Optimize `atomic.String.Store("")` by avoiding an allocation.
-
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-
-## [1.2.0] - 2017-04-12
-### Added
-- Shadow `atomic.Value` from `sync/atomic`.
-
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-
-## [1.1.0] - 2017-03-10
-### Added
-- Add atomic `Float64` type.
-
-### Changed
-- Support new `go.uber.org/atomic` import path.
-
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
-
-## [1.0.0] - 2016-07-18
-
-- Initial release.
-
-[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
deleted file mode 100644
index 8765c9fbc6..0000000000
--- a/vendor/go.uber.org/atomic/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
deleted file mode 100644
index 46c945b32b..0000000000
--- a/vendor/go.uber.org/atomic/Makefile
+++ /dev/null
@@ -1,79 +0,0 @@
-# Directory to place `go install`ed binaries into.
-export GOBIN ?= $(shell pwd)/bin
-
-GOLINT = $(GOBIN)/golint
-GEN_ATOMICINT = $(GOBIN)/gen-atomicint
-GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
-STATICCHECK = $(GOBIN)/staticcheck
-
-GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
-
-# Also update ignore section in .codecov.yml.
-COVER_IGNORE_PKGS = \
- go.uber.org/atomic/internal/gen-atomicint \
- go.uber.org/atomic/internal/gen-atomicwrapper
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: gofmt
-gofmt:
- $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
- gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
- @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
- go build -o $@ ./internal/gen-atomicwrapper
-
-$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
- go build -o $@ ./internal/gen-atomicint
-
-.PHONY: golint
-golint: $(GOLINT)
- $(GOLINT) ./...
-
-.PHONY: staticcheck
-staticcheck: $(STATICCHECK)
- $(STATICCHECK) ./...
-
-.PHONY: lint
-lint: gofmt golint staticcheck generatenodirty
-
-# comma separated list of packages to consider for code coverage.
-COVER_PKG = $(shell \
- go list -find ./... | \
- grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
- paste -sd, -)
-
-.PHONY: cover
-cover:
- go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: generate
-generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
- go generate ./...
-
-.PHONY: generatenodirty
-generatenodirty:
- @[ -z "$$(git status --porcelain)" ] || ( \
- echo "Working tree is dirty. Commit your changes first."; \
- git status; \
- exit 1 )
- @make generate
- @status=$$(git status --porcelain); \
- [ -z "$$status" ] || ( \
- echo "Working tree is dirty after `make generate`:"; \
- echo "$$status"; \
- echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
deleted file mode 100644
index 96b47a1f12..0000000000
--- a/vendor/go.uber.org/atomic/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
-
-Simple wrappers for primitive types to enforce atomic access.
-
-## Installation
-
-```shell
-$ go get -u go.uber.org/atomic@v1
-```
-
-### Legacy Import Path
-
-As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
-of using this package. If you are using Go modules, this package will fail to
-compile with the legacy import path path `github.com/uber-go/atomic`.
-
-We recommend migrating your code to the new import path but if you're unable
-to do so, or if your dependencies are still using the old import path, you
-will have to add a `replace` directive to your `go.mod` file downgrading the
-legacy import path to an older version.
-
-```
-replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
-```
-
-You can do so automatically by running the following command.
-
-```shell
-$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
-```
-
-## Usage
-
-The standard library's `sync/atomic` is powerful, but it's easy to forget which
-variables must be accessed atomically. `go.uber.org/atomic` preserves all the
-functionality of the standard library, but wraps the primitive types to
-provide a safer, more convenient API.
-
-```go
-var atom atomic.Uint32
-atom.Store(42)
-atom.Sub(2)
-atom.CAS(40, 11)
-```
-
-See the [documentation][doc] for a complete API specification.
-
-## Development Status
-
-Stable.
-
----
-
-Released under the [MIT License](LICENSE.txt).
-
-[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
-[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
-[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
-[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/atomic
-[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
-[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
deleted file mode 100644
index dfa2085f49..0000000000
--- a/vendor/go.uber.org/atomic/bool.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
-)
-
-// Bool is an atomic type-safe wrapper for bool values.
-type Bool struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroBool bool
-
-// NewBool creates a new Bool.
-func NewBool(val bool) *Bool {
- x := &Bool{}
- if val != _zeroBool {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped bool.
-func (x *Bool) Load() bool {
- return truthy(x.v.Load())
-}
-
-// Store atomically stores the passed bool.
-func (x *Bool) Store(val bool) {
- x.v.Store(boolToInt(val))
-}
-
-// CAS is an atomic compare-and-swap for bool values.
-//
-// Deprecated: Use CompareAndSwap.
-func (x *Bool) CAS(old, new bool) (swapped bool) {
- return x.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for bool values.
-func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
- return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
-}
-
-// Swap atomically stores the given bool and returns the old
-// value.
-func (x *Bool) Swap(val bool) (old bool) {
- return truthy(x.v.Swap(boolToInt(val)))
-}
-
-// MarshalJSON encodes the wrapped bool into JSON.
-func (x *Bool) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a bool from JSON.
-func (x *Bool) UnmarshalJSON(b []byte) error {
- var v bool
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
deleted file mode 100644
index a2e60e9873..0000000000
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
-
-func truthy(n uint32) bool {
- return n == 1
-}
-
-func boolToInt(b bool) uint32 {
- if b {
- return 1
- }
- return 0
-}
-
-// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() (old bool) {
- for {
- old := b.Load()
- if b.CAS(old, !old) {
- return old
- }
- }
-}
-
-// String encodes the wrapped value as a string.
-func (b *Bool) String() string {
- return strconv.FormatBool(b.Load())
-}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
deleted file mode 100644
index ae7390ee68..0000000000
--- a/vendor/go.uber.org/atomic/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package atomic provides simple wrappers around numerics to enforce atomic
-// access.
-package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
deleted file mode 100644
index 6f4157445c..0000000000
--- a/vendor/go.uber.org/atomic/duration.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "time"
-)
-
-// Duration is an atomic type-safe wrapper for time.Duration values.
-type Duration struct {
- _ nocmp // disallow non-atomic comparison
-
- v Int64
-}
-
-var _zeroDuration time.Duration
-
-// NewDuration creates a new Duration.
-func NewDuration(val time.Duration) *Duration {
- x := &Duration{}
- if val != _zeroDuration {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Duration.
-func (x *Duration) Load() time.Duration {
- return time.Duration(x.v.Load())
-}
-
-// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(val time.Duration) {
- x.v.Store(int64(val))
-}
-
-// CAS is an atomic compare-and-swap for time.Duration values.
-//
-// Deprecated: Use CompareAndSwap.
-func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
- return x.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
- return x.v.CompareAndSwap(int64(old), int64(new))
-}
-
-// Swap atomically stores the given time.Duration and returns the old
-// value.
-func (x *Duration) Swap(val time.Duration) (old time.Duration) {
- return time.Duration(x.v.Swap(int64(val)))
-}
-
-// MarshalJSON encodes the wrapped time.Duration into JSON.
-func (x *Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a time.Duration from JSON.
-func (x *Duration) UnmarshalJSON(b []byte) error {
- var v time.Duration
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
deleted file mode 100644
index 4c18b0a9ed..0000000000
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
-
-// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(delta time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(delta)))
-}
-
-// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(delta time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(delta)))
-}
-
-// String encodes the wrapped value as a string.
-func (d *Duration) String() string {
- return d.Load().String()
-}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
deleted file mode 100644
index 27b23ea162..0000000000
--- a/vendor/go.uber.org/atomic/error.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// Error is an atomic type-safe wrapper for error values.
-type Error struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroError error
-
-// NewError creates a new Error.
-func NewError(val error) *Error {
- x := &Error{}
- if val != _zeroError {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped error.
-func (x *Error) Load() error {
- return unpackError(x.v.Load())
-}
-
-// Store atomically stores the passed error.
-func (x *Error) Store(val error) {
- x.v.Store(packError(val))
-}
-
-// CompareAndSwap is an atomic compare-and-swap for error values.
-func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
- return x.v.CompareAndSwap(packError(old), packError(new))
-}
-
-// Swap atomically stores the given error and returns the old
-// value.
-func (x *Error) Swap(val error) (old error) {
- return unpackError(x.v.Swap(packError(val)))
-}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
deleted file mode 100644
index d31fb633bb..0000000000
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// atomic.Value panics on nil inputs, or if the underlying type changes.
-// Stabilize by always storing a custom struct that we control.
-
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
-
-type packedError struct{ Value error }
-
-func packError(v error) interface{} {
- return packedError{v}
-}
-
-func unpackError(v interface{}) error {
- if err, ok := v.(packedError); ok {
- return err.Value
- }
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
deleted file mode 100644
index 5d535a6d2a..0000000000
--- a/vendor/go.uber.org/atomic/float32.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float32 is an atomic type-safe wrapper for float32 values.
-type Float32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroFloat32 float32
-
-// NewFloat32 creates a new Float32.
-func NewFloat32(val float32) *Float32 {
- x := &Float32{}
- if val != _zeroFloat32 {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped float32.
-func (x *Float32) Load() float32 {
- return math.Float32frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float32.
-func (x *Float32) Store(val float32) {
- x.v.Store(math.Float32bits(val))
-}
-
-// Swap atomically stores the given float32 and returns the old
-// value.
-func (x *Float32) Swap(val float32) (old float32) {
- return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
-}
-
-// MarshalJSON encodes the wrapped float32 into JSON.
-func (x *Float32) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float32 from JSON.
-func (x *Float32) UnmarshalJSON(b []byte) error {
- var v float32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go
deleted file mode 100644
index b0cd8d9c82..0000000000
--- a/vendor/go.uber.org/atomic/float32_ext.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "math"
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
-
-// Add atomically adds to the wrapped float32 and returns the new value.
-func (f *Float32) Add(delta float32) float32 {
- for {
- old := f.Load()
- new := old + delta
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float32 and returns the new value.
-func (f *Float32) Sub(delta float32) float32 {
- return f.Add(-delta)
-}
-
-// CAS is an atomic compare-and-swap for float32 values.
-//
-// Deprecated: Use CompareAndSwap
-func (f *Float32) CAS(old, new float32) (swapped bool) {
- return f.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for float32 values.
-//
-// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
-//
-// for {
-// old := atom.Load()
-// new = f(old)
-// if atom.CompareAndSwap(old, new) {
-// break
-// }
-// }
-//
-// If CompareAndSwap did not match NaN to match, then the above would loop forever.
-func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
- return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float32) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
-}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
deleted file mode 100644
index 11d5189a5f..0000000000
--- a/vendor/go.uber.org/atomic/float64.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float64 is an atomic type-safe wrapper for float64 values.
-type Float64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint64
-}
-
-var _zeroFloat64 float64
-
-// NewFloat64 creates a new Float64.
-func NewFloat64(val float64) *Float64 {
- x := &Float64{}
- if val != _zeroFloat64 {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped float64.
-func (x *Float64) Load() float64 {
- return math.Float64frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float64.
-func (x *Float64) Store(val float64) {
- x.v.Store(math.Float64bits(val))
-}
-
-// Swap atomically stores the given float64 and returns the old
-// value.
-func (x *Float64) Swap(val float64) (old float64) {
- return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
-}
-
-// MarshalJSON encodes the wrapped float64 into JSON.
-func (x *Float64) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float64 from JSON.
-func (x *Float64) UnmarshalJSON(b []byte) error {
- var v float64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
deleted file mode 100644
index 48c52b0abf..0000000000
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "math"
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
-
-// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(delta float64) float64 {
- for {
- old := f.Load()
- new := old + delta
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(delta float64) float64 {
- return f.Add(-delta)
-}
-
-// CAS is an atomic compare-and-swap for float64 values.
-//
-// Deprecated: Use CompareAndSwap
-func (f *Float64) CAS(old, new float64) (swapped bool) {
- return f.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for float64 values.
-//
-// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
-//
-// for {
-// old := atom.Load()
-// new = f(old)
-// if atom.CompareAndSwap(old, new) {
-// break
-// }
-// }
-//
-// If CompareAndSwap did not match NaN to match, then the above would loop forever.
-func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
- return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float64) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(f.Load(), 'g', -1, 64)
-}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
deleted file mode 100644
index 1e9ef4f879..0000000000
--- a/vendor/go.uber.org/atomic/gen.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
-//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
-//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
-//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
-//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
deleted file mode 100644
index b9a68f42ca..0000000000
--- a/vendor/go.uber.org/atomic/int32.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int32 is an atomic wrapper around int32.
-type Int32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int32
-}
-
-// NewInt32 creates a new Int32.
-func NewInt32(val int32) *Int32 {
- return &Int32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int32) Load() int32 {
- return atomic.LoadInt32(&i.v)
-}
-
-// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(delta int32) int32 {
- return atomic.AddInt32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(delta int32) int32 {
- return atomic.AddInt32(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int32 and returns the new value.
-func (i *Int32) Inc() int32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int32 and returns the new value.
-func (i *Int32) Dec() int32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Int32) CAS(old, new int32) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
- return atomic.CompareAndSwapInt32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int32) Store(val int32) {
- atomic.StoreInt32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(val int32) (old int32) {
- return atomic.SwapInt32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int32 into JSON.
-func (i *Int32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int32.
-func (i *Int32) UnmarshalJSON(b []byte) error {
- var v int32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int32) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
deleted file mode 100644
index 78d260976f..0000000000
--- a/vendor/go.uber.org/atomic/int64.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int64 is an atomic wrapper around int64.
-type Int64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int64
-}
-
-// NewInt64 creates a new Int64.
-func NewInt64(val int64) *Int64 {
- return &Int64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int64) Load() int64 {
- return atomic.LoadInt64(&i.v)
-}
-
-// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(delta int64) int64 {
- return atomic.AddInt64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(delta int64) int64 {
- return atomic.AddInt64(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int64 and returns the new value.
-func (i *Int64) Inc() int64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int64 and returns the new value.
-func (i *Int64) Dec() int64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Int64) CAS(old, new int64) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
- return atomic.CompareAndSwapInt64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int64) Store(val int64) {
- atomic.StoreInt64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(val int64) (old int64) {
- return atomic.SwapInt64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int64 into JSON.
-func (i *Int64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int64.
-func (i *Int64) UnmarshalJSON(b []byte) error {
- var v int64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int64) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
deleted file mode 100644
index 54b74174ab..0000000000
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// nocmp is an uncomparable struct. Embed this inside another struct to make
-// it uncomparable.
-//
-// type Foo struct {
-// nocmp
-// // ...
-// }
-//
-// This DOES NOT:
-//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
-type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
deleted file mode 100644
index e0f47dba46..0000000000
--- a/vendor/go.uber.org/atomic/pointer_go118.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18 && !go1.19
-// +build go1.18,!go1.19
-
-package atomic
-
-import "unsafe"
-
-type Pointer[T any] struct {
- _ nocmp // disallow non-atomic comparison
- p UnsafePointer
-}
-
-// NewPointer creates a new Pointer.
-func NewPointer[T any](v *T) *Pointer[T] {
- var p Pointer[T]
- if v != nil {
- p.p.Store(unsafe.Pointer(v))
- }
- return &p
-}
-
-// Load atomically loads the wrapped value.
-func (p *Pointer[T]) Load() *T {
- return (*T)(p.p.Load())
-}
-
-// Store atomically stores the passed value.
-func (p *Pointer[T]) Store(val *T) {
- p.p.Store(unsafe.Pointer(val))
-}
-
-// Swap atomically swaps the wrapped pointer and returns the old value.
-func (p *Pointer[T]) Swap(val *T) (old *T) {
- return (*T)(p.p.Swap(unsafe.Pointer(val)))
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
- return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
-}
diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go
deleted file mode 100644
index 6726f17ad6..0000000000
--- a/vendor/go.uber.org/atomic/pointer_go119.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.19
-// +build go1.19
-
-package atomic
-
-import "sync/atomic"
-
-// Pointer is an atomic pointer of type *T.
-type Pointer[T any] struct {
- _ nocmp // disallow non-atomic comparison
- p atomic.Pointer[T]
-}
-
-// NewPointer creates a new Pointer.
-func NewPointer[T any](v *T) *Pointer[T] {
- var p Pointer[T]
- if v != nil {
- p.p.Store(v)
- }
- return &p
-}
-
-// Load atomically loads the wrapped value.
-func (p *Pointer[T]) Load() *T {
- return p.p.Load()
-}
-
-// Store atomically stores the passed value.
-func (p *Pointer[T]) Store(val *T) {
- p.p.Store(val)
-}
-
-// Swap atomically swaps the wrapped pointer and returns the old value.
-func (p *Pointer[T]) Swap(val *T) (old *T) {
- return p.p.Swap(val)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
- return p.p.CompareAndSwap(old, new)
-}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
deleted file mode 100644
index c4bea70f4d..0000000000
--- a/vendor/go.uber.org/atomic/string.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// String is an atomic type-safe wrapper for string values.
-type String struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroString string
-
-// NewString creates a new String.
-func NewString(val string) *String {
- x := &String{}
- if val != _zeroString {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped string.
-func (x *String) Load() string {
- if v := x.v.Load(); v != nil {
- return v.(string)
- }
- return _zeroString
-}
-
-// Store atomically stores the passed string.
-func (x *String) Store(val string) {
- x.v.Store(val)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for string values.
-func (x *String) CompareAndSwap(old, new string) (swapped bool) {
- return x.v.CompareAndSwap(old, new)
-}
-
-// Swap atomically stores the given string and returns the old
-// value.
-func (x *String) Swap(val string) (old string) {
- return x.v.Swap(val).(string)
-}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
deleted file mode 100644
index 1f63dfd5b9..0000000000
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
-
-// String returns the wrapped value.
-func (s *String) String() string {
- return s.Load()
-}
-
-// MarshalText encodes the wrapped string into a textual form.
-//
-// This makes it encodable as JSON, YAML, XML, and more.
-func (s *String) MarshalText() ([]byte, error) {
- return []byte(s.Load()), nil
-}
-
-// UnmarshalText decodes text and replaces the wrapped string with it.
-//
-// This makes it decodable from JSON, YAML, XML, and more.
-func (s *String) UnmarshalText(b []byte) error {
- s.Store(string(b))
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
deleted file mode 100644
index 1660feb142..0000000000
--- a/vendor/go.uber.org/atomic/time.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "time"
-)
-
-// Time is an atomic type-safe wrapper for time.Time values.
-type Time struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroTime time.Time
-
-// NewTime creates a new Time.
-func NewTime(val time.Time) *Time {
- x := &Time{}
- if val != _zeroTime {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Time.
-func (x *Time) Load() time.Time {
- return unpackTime(x.v.Load())
-}
-
-// Store atomically stores the passed time.Time.
-func (x *Time) Store(val time.Time) {
- x.v.Store(packTime(val))
-}
diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go
deleted file mode 100644
index 1e3dc978aa..0000000000
--- a/vendor/go.uber.org/atomic/time_ext.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
-
-func packTime(t time.Time) interface{} {
- return t
-}
-
-func unpackTime(v interface{}) time.Time {
- if t, ok := v.(time.Time); ok {
- return t
- }
- return time.Time{}
-}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
deleted file mode 100644
index d6f04a96dc..0000000000
--- a/vendor/go.uber.org/atomic/uint32.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint32 is an atomic wrapper around uint32.
-type Uint32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint32
-}
-
-// NewUint32 creates a new Uint32.
-func NewUint32(val uint32) *Uint32 {
- return &Uint32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint32) Load() uint32 {
- return atomic.LoadUint32(&i.v)
-}
-
-// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint32 and returns the new value.
-func (i *Uint32) Inc() uint32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint32 and returns the new value.
-func (i *Uint32) Dec() uint32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uint32) CAS(old, new uint32) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
- return atomic.CompareAndSwapUint32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint32) Store(val uint32) {
- atomic.StoreUint32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(val uint32) (old uint32) {
- return atomic.SwapUint32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint32 into JSON.
-func (i *Uint32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint32.
-func (i *Uint32) UnmarshalJSON(b []byte) error {
- var v uint32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint32) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
deleted file mode 100644
index 2574bdd5ec..0000000000
--- a/vendor/go.uber.org/atomic/uint64.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint64 is an atomic wrapper around uint64.
-type Uint64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint64
-}
-
-// NewUint64 creates a new Uint64.
-func NewUint64(val uint64) *Uint64 {
- return &Uint64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint64) Load() uint64 {
- return atomic.LoadUint64(&i.v)
-}
-
-// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint64 and returns the new value.
-func (i *Uint64) Inc() uint64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint64 and returns the new value.
-func (i *Uint64) Dec() uint64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uint64) CAS(old, new uint64) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
- return atomic.CompareAndSwapUint64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint64) Store(val uint64) {
- atomic.StoreUint64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(val uint64) (old uint64) {
- return atomic.SwapUint64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint64 into JSON.
-func (i *Uint64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint64.
-func (i *Uint64) UnmarshalJSON(b []byte) error {
- var v uint64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint64) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
deleted file mode 100644
index 81b275a7ad..0000000000
--- a/vendor/go.uber.org/atomic/uintptr.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uintptr is an atomic wrapper around uintptr.
-type Uintptr struct {
- _ nocmp // disallow non-atomic comparison
-
- v uintptr
-}
-
-// NewUintptr creates a new Uintptr.
-func NewUintptr(val uintptr) *Uintptr {
- return &Uintptr{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uintptr) Load() uintptr {
- return atomic.LoadUintptr(&i.v)
-}
-
-// Add atomically adds to the wrapped uintptr and returns the new value.
-func (i *Uintptr) Add(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uintptr and returns the new value.
-func (i *Uintptr) Sub(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uintptr and returns the new value.
-func (i *Uintptr) Inc() uintptr {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uintptr and returns the new value.
-func (i *Uintptr) Dec() uintptr {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
- return atomic.CompareAndSwapUintptr(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uintptr) Store(val uintptr) {
- atomic.StoreUintptr(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uintptr and returns the old value.
-func (i *Uintptr) Swap(val uintptr) (old uintptr) {
- return atomic.SwapUintptr(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uintptr into JSON.
-func (i *Uintptr) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uintptr.
-func (i *Uintptr) UnmarshalJSON(b []byte) error {
- var v uintptr
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uintptr) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
deleted file mode 100644
index 34868baf6a..0000000000
--- a/vendor/go.uber.org/atomic/unsafe_pointer.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2021-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// UnsafePointer is an atomic wrapper around unsafe.Pointer.
-type UnsafePointer struct {
- _ nocmp // disallow non-atomic comparison
-
- v unsafe.Pointer
-}
-
-// NewUnsafePointer creates a new UnsafePointer.
-func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
- return &UnsafePointer{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (p *UnsafePointer) Load() unsafe.Pointer {
- return atomic.LoadPointer(&p.v)
-}
-
-// Store atomically stores the passed value.
-func (p *UnsafePointer) Store(val unsafe.Pointer) {
- atomic.StorePointer(&p.v, val)
-}
-
-// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
-func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
- return atomic.SwapPointer(&p.v, val)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap
-func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
- return p.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
- return atomic.CompareAndSwapPointer(&p.v, old, new)
-}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
deleted file mode 100644
index 52caedb9a5..0000000000
--- a/vendor/go.uber.org/atomic/value.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "sync/atomic"
-
-// Value shadows the type of the same name from sync/atomic
-// https://godoc.org/sync/atomic#Value
-type Value struct {
- _ nocmp // disallow non-atomic comparison
-
- atomic.Value
-}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
deleted file mode 100644
index 6d4d1be7b5..0000000000
--- a/vendor/go.uber.org/multierr/.codecov.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 100 # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
deleted file mode 100644
index b9a05e3da0..0000000000
--- a/vendor/go.uber.org/multierr/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/vendor
-cover.html
-cover.out
-/bin
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
deleted file mode 100644
index f8177b978c..0000000000
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ /dev/null
@@ -1,95 +0,0 @@
-Releases
-========
-
-v1.11.0 (2023-03-28)
-====================
-- `Errors` now supports any error that implements multiple-error
- interface.
-- Add `Every` function to allow checking if all errors in the chain
- satisfies `errors.Is` against the target error.
-
-v1.10.0 (2023-03-08)
-====================
-
-- Comply with Go 1.20's multiple-error interface.
-- Drop Go 1.18 support.
- Per the support policy, only Go 1.19 and 1.20 are supported now.
-- Drop all non-test external dependencies.
-
-v1.9.0 (2022-12-12)
-===================
-
-- Add `AppendFunc` that allow passsing functions to similar to
- `AppendInvoke`.
-
-- Bump up yaml.v3 dependency to 3.0.1.
-
-v1.8.0 (2022-02-28)
-===================
-
-- `Combine`: perform zero allocations when there are no errors.
-
-
-v1.7.0 (2021-05-06)
-===================
-
-- Add `AppendInvoke` to append into errors from `defer` blocks.
-
-
-v1.6.0 (2020-09-14)
-===================
-
-- Actually drop library dependency on development-time tooling.
-
-
-v1.5.0 (2020-02-24)
-===================
-
-- Drop library dependency on development-time tooling.
-
-
-v1.4.0 (2019-11-04)
-===================
-
-- Add `AppendInto` function to more ergonomically build errors inside a
- loop.
-
-
-v1.3.0 (2019-10-29)
-===================
-
-- Switch to Go modules.
-
-
-v1.2.0 (2019-09-26)
-===================
-
-- Support extracting and matching against wrapped errors with `errors.As`
- and `errors.Is`.
-
-
-v1.1.0 (2017-06-30)
-===================
-
-- Added an `Errors(error) []error` function to extract the underlying list of
- errors for a multierr error.
-
-
-v1.0.0 (2017-05-31)
-===================
-
-No changes since v0.2.0. This release is committing to making no breaking
-changes to the current API in the 1.X series.
-
-
-v0.2.0 (2017-04-11)
-===================
-
-- Repeatedly appending to the same error is now faster due to fewer
- allocations.
-
-
-v0.1.0 (2017-31-03)
-===================
-
-- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
deleted file mode 100644
index 413e30f7ce..0000000000
--- a/vendor/go.uber.org/multierr/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2017-2021 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
deleted file mode 100644
index dcb6fe723c..0000000000
--- a/vendor/go.uber.org/multierr/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# Directory to put `go install`ed binaries in.
-export GOBIN ?= $(shell pwd)/bin
-
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: gofmt
-gofmt:
- $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
- @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
- @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
-
-.PHONY: golint
-golint:
- @cd tools && go install golang.org/x/lint/golint
- @$(GOBIN)/golint ./...
-
-.PHONY: staticcheck
-staticcheck:
- @cd tools && go install honnef.co/go/tools/cmd/staticcheck
- @$(GOBIN)/staticcheck ./...
-
-.PHONY: lint
-lint: gofmt golint staticcheck
-
-.PHONY: cover
-cover:
- go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
- go tool cover -html=cover.out -o cover.html
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
deleted file mode 100644
index 5ab6ac40f4..0000000000
--- a/vendor/go.uber.org/multierr/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
-
-`multierr` allows combining one or more Go `error`s together.
-
-## Features
-
-- **Idiomatic**:
- multierr follows best practices in Go, and keeps your code idiomatic.
- - It keeps the underlying error type hidden,
- allowing you to deal in `error` values exclusively.
- - It provides APIs to safely append into an error from a `defer` statement.
-- **Performant**:
- multierr is optimized for performance:
- - It avoids allocations where possible.
- - It utilizes slice resizing semantics to optimize common cases
- like appending into the same error object from a loop.
-- **Interoperable**:
- multierr interoperates with the Go standard library's error APIs seamlessly:
- - The `errors.Is` and `errors.As` functions *just work*.
-- **Lightweight**:
- multierr comes with virtually no dependencies.
-
-## Installation
-
-```bash
-go get -u go.uber.org/multierr@latest
-```
-
-## Status
-
-Stable: No breaking changes will be made before 2.0.
-
--------------------------------------------------------------------------------
-
-Released under the [MIT License].
-
-[MIT License]: LICENSE.txt
-[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
-[doc]: https://pkg.go.dev/go.uber.org/multierr
-[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
-[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
-[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
deleted file mode 100644
index 3a828b2dff..0000000000
--- a/vendor/go.uber.org/multierr/error.go
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright (c) 2017-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package multierr allows combining one or more errors together.
-//
-// # Overview
-//
-// Errors can be combined with the use of the Combine function.
-//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
-//
-// If only two errors are being combined, the Append function may be used
-// instead.
-//
-// err = multierr.Append(reader.Close(), writer.Close())
-//
-// The underlying list of errors for a returned error object may be retrieved
-// with the Errors function.
-//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
-//
-// # Appending from a loop
-//
-// You sometimes need to append into an error from a loop.
-//
-// var err error
-// for _, item := range items {
-// err = multierr.Append(err, process(item))
-// }
-//
-// Cases like this may require knowledge of whether an individual instance
-// failed. This usually requires introduction of a new variable.
-//
-// var err error
-// for _, item := range items {
-// if perr := process(item); perr != nil {
-// log.Warn("skipping item", item)
-// err = multierr.Append(err, perr)
-// }
-// }
-//
-// multierr includes AppendInto to simplify cases like this.
-//
-// var err error
-// for _, item := range items {
-// if multierr.AppendInto(&err, process(item)) {
-// log.Warn("skipping item", item)
-// }
-// }
-//
-// This will append the error into the err variable, and return true if that
-// individual error was non-nil.
-//
-// See [AppendInto] for more information.
-//
-// # Deferred Functions
-//
-// Go makes it possible to modify the return value of a function in a defer
-// block if the function was using named returns. This makes it possible to
-// record resource cleanup failures from deferred blocks.
-//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
-//
-// multierr provides the Invoker type and AppendInvoke function to make cases
-// like the above simpler and obviate the need for a closure. The following is
-// roughly equivalent to the example above.
-//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(&err, multierr.Close(conn))
-// // ...
-// }
-//
-// See [AppendInvoke] and [Invoker] for more information.
-//
-// NOTE: If you're modifying an error from inside a defer, you MUST use a named
-// return value for that function.
-//
-// # Advanced Usage
-//
-// Errors returned by Combine and Append MAY implement the following
-// interface.
-//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
-//
-// Note that if you need access to list of errors behind a multierr error, you
-// should prefer using the Errors function. That said, if you need cheap
-// read-only access to the underlying errors slice, you can attempt to cast
-// the error to this interface. You MUST handle the failure case gracefully
-// because errors returned by Combine and Append are not guaranteed to
-// implement this interface.
-//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
-package multierr // import "go.uber.org/multierr"
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-var (
- // Separator for single-line error messages.
- _singlelineSeparator = []byte("; ")
-
- // Prefix for multi-line messages
- _multilinePrefix = []byte("the following errors occurred:")
-
- // Prefix for the first and following lines of an item in a list of
- // multi-line error messages.
- //
- // For example, if a single item is:
- //
- // foo
- // bar
- //
- // It will become,
- //
- // - foo
- // bar
- _multilineSeparator = []byte("\n - ")
- _multilineIndent = []byte(" ")
-)
-
-// _bufferPool is a pool of bytes.Buffers.
-var _bufferPool = sync.Pool{
- New: func() interface{} {
- return &bytes.Buffer{}
- },
-}
-
-type errorGroup interface {
- Errors() []error
-}
-
-// Errors returns a slice containing zero or more errors that the supplied
-// error is composed of. If the error is nil, a nil slice is returned.
-//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
-//
-// If the error is not composed of other errors, the returned slice contains
-// just the error that was passed in.
-//
-// Callers of this function are free to modify the returned slice.
-func Errors(err error) []error {
- return extractErrors(err)
-}
-
-// multiError is an error that holds one or more errors.
-//
-// An instance of this is guaranteed to be non-empty and flattened. That is,
-// none of the errors inside multiError are other multiErrors.
-//
-// multiError formats to a semi-colon delimited list of error messages with
-// %v and with a more readable multi-line format with %+v.
-type multiError struct {
- copyNeeded atomic.Bool
- errors []error
-}
-
-// Errors returns the list of underlying errors.
-//
-// This slice MUST NOT be modified.
-func (merr *multiError) Errors() []error {
- if merr == nil {
- return nil
- }
- return merr.errors
-}
-
-func (merr *multiError) Error() string {
- if merr == nil {
- return ""
- }
-
- buff := _bufferPool.Get().(*bytes.Buffer)
- buff.Reset()
-
- merr.writeSingleline(buff)
-
- result := buff.String()
- _bufferPool.Put(buff)
- return result
-}
-
-// Every compares every error in the given err against the given target error
-// using [errors.Is], and returns true only if every comparison returned true.
-func Every(err error, target error) bool {
- for _, e := range extractErrors(err) {
- if !errors.Is(e, target) {
- return false
- }
- }
- return true
-}
-
-func (merr *multiError) Format(f fmt.State, c rune) {
- if c == 'v' && f.Flag('+') {
- merr.writeMultiline(f)
- } else {
- merr.writeSingleline(f)
- }
-}
-
-func (merr *multiError) writeSingleline(w io.Writer) {
- first := true
- for _, item := range merr.errors {
- if first {
- first = false
- } else {
- w.Write(_singlelineSeparator)
- }
- io.WriteString(w, item.Error())
- }
-}
-
-func (merr *multiError) writeMultiline(w io.Writer) {
- w.Write(_multilinePrefix)
- for _, item := range merr.errors {
- w.Write(_multilineSeparator)
- writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
- }
-}
-
-// Writes s to the writer with the given prefix added before each line after
-// the first.
-func writePrefixLine(w io.Writer, prefix []byte, s string) {
- first := true
- for len(s) > 0 {
- if first {
- first = false
- } else {
- w.Write(prefix)
- }
-
- idx := strings.IndexByte(s, '\n')
- if idx < 0 {
- idx = len(s) - 1
- }
-
- io.WriteString(w, s[:idx+1])
- s = s[idx+1:]
- }
-}
-
-type inspectResult struct {
- // Number of top-level non-nil errors
- Count int
-
- // Total number of errors including multiErrors
- Capacity int
-
- // Index of the first non-nil error in the list. Value is meaningless if
- // Count is zero.
- FirstErrorIdx int
-
- // Whether the list contains at least one multiError
- ContainsMultiError bool
-}
-
-// Inspects the given slice of errors so that we can efficiently allocate
-// space for it.
-func inspect(errors []error) (res inspectResult) {
- first := true
- for i, err := range errors {
- if err == nil {
- continue
- }
-
- res.Count++
- if first {
- first = false
- res.FirstErrorIdx = i
- }
-
- if merr, ok := err.(*multiError); ok {
- res.Capacity += len(merr.errors)
- res.ContainsMultiError = true
- } else {
- res.Capacity++
- }
- }
- return
-}
-
-// fromSlice converts the given list of errors into a single error.
-func fromSlice(errors []error) error {
- // Don't pay to inspect small slices.
- switch len(errors) {
- case 0:
- return nil
- case 1:
- return errors[0]
- }
-
- res := inspect(errors)
- switch res.Count {
- case 0:
- return nil
- case 1:
- // only one non-nil entry
- return errors[res.FirstErrorIdx]
- case len(errors):
- if !res.ContainsMultiError {
- // Error list is flat. Make a copy of it
- // Otherwise "errors" escapes to the heap
- // unconditionally for all other cases.
- // This lets us optimize for the "no errors" case.
- out := append(([]error)(nil), errors...)
- return &multiError{errors: out}
- }
- }
-
- nonNilErrs := make([]error, 0, res.Capacity)
- for _, err := range errors[res.FirstErrorIdx:] {
- if err == nil {
- continue
- }
-
- if nested, ok := err.(*multiError); ok {
- nonNilErrs = append(nonNilErrs, nested.errors...)
- } else {
- nonNilErrs = append(nonNilErrs, err)
- }
- }
-
- return &multiError{errors: nonNilErrs}
-}
-
-// Combine combines the passed errors into a single error.
-//
-// If zero arguments were passed or if all items are nil, a nil error is
-// returned.
-//
-// Combine(nil, nil) // == nil
-//
-// If only a single error was passed, it is returned as-is.
-//
-// Combine(err) // == err
-//
-// Combine skips over nil arguments so this function may be used to combine
-// together errors from operations that fail independently of each other.
-//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
-//
-// If any of the passed errors is a multierr error, it will be flattened along
-// with the other errors.
-//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
-//
-// The returned error formats into a readable multi-line error message if
-// formatted with %+v.
-//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
-func Combine(errors ...error) error {
- return fromSlice(errors)
-}
-
-// Append appends the given errors together. Either value may be nil.
-//
-// This function is a specialization of Combine for the common case where
-// there are only two errors.
-//
-// err = multierr.Append(reader.Close(), writer.Close())
-//
-// The following pattern may also be used to record failure of deferred
-// operations without losing information about the original error.
-//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
-//
-// Note that the variable MUST be a named return to append an error to it from
-// the defer statement. See also [AppendInvoke].
-func Append(left error, right error) error {
- switch {
- case left == nil:
- return right
- case right == nil:
- return left
- }
-
- if _, ok := right.(*multiError); !ok {
- if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
- // Common case where the error on the left is constantly being
- // appended to.
- errs := append(l.errors, right)
- return &multiError{errors: errs}
- } else if !ok {
- // Both errors are single errors.
- return &multiError{errors: []error{left, right}}
- }
- }
-
- // Either right or both, left and right, are multiErrors. Rely on usual
- // expensive logic.
- errors := [2]error{left, right}
- return fromSlice(errors[0:])
-}
-
-// AppendInto appends an error into the destination of an error pointer and
-// returns whether the error being appended was non-nil.
-//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
-//
-// The above is equivalent to,
-//
-// err := multierr.Append(r.Close(), w.Close())
-//
-// As AppendInto reports whether the provided error was non-nil, it may be
-// used to build a multierr error in a loop more ergonomically. For example:
-//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
-//
-// Compare this with a version that relies solely on Append:
-//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
-func AppendInto(into *error, err error) (errored bool) {
- if into == nil {
- // We panic if 'into' is nil. This is not documented above
- // because suggesting that the pointer must be non-nil may
- // confuse users into thinking that the error that it points
- // to must be non-nil.
- panic("misuse of multierr.AppendInto: into pointer must not be nil")
- }
-
- if err == nil {
- return false
- }
- *into = Append(*into, err)
- return true
-}
-
-// Invoker is an operation that may fail with an error. Use it with
-// AppendInvoke to append the result of calling the function into an error.
-// This allows you to conveniently defer capture of failing operations.
-//
-// See also, [Close] and [Invoke].
-type Invoker interface {
- Invoke() error
-}
-
-// Invoke wraps a function which may fail with an error to match the Invoker
-// interface. Use it to supply functions matching this signature to
-// AppendInvoke.
-//
-// For example,
-//
-// func processReader(r io.Reader) (err error) {
-// scanner := bufio.NewScanner(r)
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
-// for scanner.Scan() {
-// // ...
-// }
-// // ...
-// }
-//
-// In this example, the following line will construct the Invoker right away,
-// but defer the invocation of scanner.Err() until the function returns.
-//
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
-//
-// Note that the error you're appending to from the defer statement MUST be a
-// named return.
-type Invoke func() error
-
-// Invoke calls the supplied function and returns its result.
-func (i Invoke) Invoke() error { return i() }
-
-// Close builds an Invoker that closes the provided io.Closer. Use it with
-// AppendInvoke to close io.Closers and append their results into an error.
-//
-// For example,
-//
-// func processFile(path string) (err error) {
-// f, err := os.Open(path)
-// if err != nil {
-// return err
-// }
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
-// return processReader(f)
-// }
-//
-// In this example, multierr.Close will construct the Invoker right away, but
-// defer the invocation of f.Close until the function returns.
-//
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
-//
-// Note that the error you're appending to from the defer statement MUST be a
-// named return.
-func Close(closer io.Closer) Invoker {
- return Invoke(closer.Close)
-}
-
-// AppendInvoke appends the result of calling the given Invoker into the
-// provided error pointer. Use it with named returns to safely defer
-// invocation of fallible operations until a function returns, and capture the
-// resulting errors.
-//
-// func doSomething(...) (err error) {
-// // ...
-// f, err := openFile(..)
-// if err != nil {
-// return err
-// }
-//
-// // multierr will call f.Close() when this function returns and
-// // if the operation fails, its append its error into the
-// // returned error.
-// defer multierr.AppendInvoke(&err, multierr.Close(f))
-//
-// scanner := bufio.NewScanner(f)
-// // Similarly, this scheduled scanner.Err to be called and
-// // inspected when the function returns and append its error
-// // into the returned error.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
-//
-// // ...
-// }
-//
-// NOTE: If used with a defer, the error variable MUST be a named return.
-//
-// Without defer, AppendInvoke behaves exactly like AppendInto.
-//
-// err := // ...
-// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
-//
-// // ...is roughly equivalent to...
-//
-// err := // ...
-// multierr.AppendInto(&err, foo())
-//
-// The advantage of the indirection introduced by Invoker is to make it easy
-// to defer the invocation of a function. Without this indirection, the
-// invoked function will be evaluated at the time of the defer block rather
-// than when the function returns.
-//
-// // BAD: This is likely not what the caller intended. This will evaluate
-// // foo() right away and append its result into the error when the
-// // function returns.
-// defer multierr.AppendInto(&err, foo())
-//
-// // GOOD: This will defer invocation of foo unutil the function returns.
-// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
-//
-// multierr provides a few Invoker implementations out of the box for
-// convenience. See [Invoker] for more information.
-func AppendInvoke(into *error, invoker Invoker) {
- AppendInto(into, invoker.Invoke())
-}
-
-// AppendFunc is a shorthand for [AppendInvoke].
-// It allows using function or method value directly
-// without having to wrap it into an [Invoker] interface.
-//
-// func doSomething(...) (err error) {
-// w, err := startWorker(...)
-// if err != nil {
-// return err
-// }
-//
-// // multierr will call w.Stop() when this function returns and
-// // if the operation fails, it appends its error into the
-// // returned error.
-// defer multierr.AppendFunc(&err, w.Stop)
-// }
-func AppendFunc(into *error, fn func() error) {
- AppendInvoke(into, Invoke(fn))
-}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
deleted file mode 100644
index a173f9c251..0000000000
--- a/vendor/go.uber.org/multierr/error_post_go120.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2017-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.20
-// +build go1.20
-
-package multierr
-
-// Unwrap returns a list of errors wrapped by this multierr.
-func (merr *multiError) Unwrap() []error {
- return merr.Errors()
-}
-
-type multipleErrors interface {
- Unwrap() []error
-}
-
-func extractErrors(err error) []error {
- if err == nil {
- return nil
- }
-
- // check if the given err is an Unwrapable error that
- // implements multipleErrors interface.
- eg, ok := err.(multipleErrors)
- if !ok {
- return []error{err}
- }
-
- return append(([]error)(nil), eg.Unwrap()...)
-}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
deleted file mode 100644
index 93872a3fcd..0000000000
--- a/vendor/go.uber.org/multierr/error_pre_go120.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2017-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build !go1.20
-// +build !go1.20
-
-package multierr
-
-import "errors"
-
-// Versions of Go before 1.20 did not support the Unwrap() []error method.
-// This provides a similar behavior by implementing the Is(..) and As(..)
-// methods.
-// See the errors.Join proposal for details:
-// https://github.com/golang/go/issues/53435
-
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
-func extractErrors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- return append(([]error)(nil), eg.Errors()...)
-}
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
deleted file mode 100644
index 8e5ca7d3e2..0000000000
--- a/vendor/go.uber.org/zap/.codecov.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 95% # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-ignore:
- - internal/readme/readme.go
-
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
deleted file mode 100644
index da9d9d00b4..0000000000
--- a/vendor/go.uber.org/zap/.gitignore
+++ /dev/null
@@ -1,32 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-vendor
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-*.pprof
-*.out
-*.log
-
-/bin
-cover.out
-cover.html
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
deleted file mode 100644
index 92aa65d660..0000000000
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ /dev/null
@@ -1,109 +0,0 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
-
-Blazing fast, structured, leveled logging in Go.
-
-## Installation
-
-`go get -u go.uber.org/zap`
-
-Note that zap only supports the two most recent minor versions of Go.
-
-## Quick Start
-
-In contexts where performance is nice, but not critical, use the
-`SugaredLogger`. It's 4-10x faster than other structured logging
-packages and includes both structured and `printf`-style APIs.
-
-```go
-logger, _ := zap.NewProduction()
-defer logger.Sync() // flushes buffer, if any
-sugar := logger.Sugar()
-sugar.Infow("failed to fetch URL",
- // Structured context as loosely typed key-value pairs.
- "url", url,
- "attempt", 3,
- "backoff", time.Second,
-)
-sugar.Infof("Failed to fetch URL: %s", url)
-```
-
-When performance and type safety are critical, use the `Logger`. It's even
-faster than the `SugaredLogger` and allocates far less, but it only supports
-structured logging.
-
-```go
-logger, _ := zap.NewProduction()
-defer logger.Sync()
-logger.Info("failed to fetch URL",
- // Structured context as strongly typed Field values.
- zap.String("url", url),
- zap.Int("attempt", 3),
- zap.Duration("backoff", time.Second),
-)
-```
-
-See the [documentation][doc] and [FAQ](FAQ.md) for more details.
-
-## Performance
-
-For applications that log in the hot path, reflection-based serialization and
-string formatting are prohibitively expensive — they're CPU-intensive
-and make many small allocations. Put differently, using `encoding/json` and
-`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
-
-Zap takes a different approach. It includes a reflection-free, zero-allocation
-JSON encoder, and the base `Logger` strives to avoid serialization overhead
-and allocations wherever possible. By building the high-level `SugaredLogger`
-on that foundation, zap lets users *choose* when they need to count every
-allocation and when they'd prefer a more familiar, loosely typed API.
-
-As measured by its own [benchmarking suite][], not only is zap more performant
-than comparable structured logging packages — it's also faster than the
-standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
-
-Log a message and 10 fields:
-
-{{.BenchmarkAddingFields}}
-
-Log a message with a logger that already has 10 fields of context:
-
-{{.BenchmarkAccumulatedContext}}
-
-Log a static string, without any context or `printf`-style templating:
-
-{{.BenchmarkWithoutFields}}
-
-## Development Status: Stable
-
-All APIs are finalized, and no breaking changes will be made in the 1.x series
-of releases. Users of semver-aware dependency management systems should pin
-zap to `^1`.
-
-## Contributing
-
-We encourage and support an active, healthy community of contributors —
-including you! Details are in the [contribution guide](CONTRIBUTING.md) and
-the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
-issues and pull requests, but you can also report any negative conduct to
-oss-conduct@uber.com. That email list is a private, safe space; even the zap
-maintainers don't have access, so don't hesitate to hold us to a high
-standard.
-
-
-
-Released under the [MIT License](LICENSE.txt).
-
- In particular, keep in mind that we may be
-benchmarking against slightly older versions of other packages. Versions are
-pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-
-[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
-[doc]: https://pkg.go.dev/go.uber.org/zap
-[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
-[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
-[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/zap
-[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
-
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
deleted file mode 100644
index 0db1f9f15f..0000000000
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ /dev/null
@@ -1,617 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
-
-## 1.24.0 (30 Nov 2022)
-
-Enhancements:
-* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
- current minimum enabled log level.
-* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
-
-Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
-contributions to this release.
-
-[#1148]: https://github.coml/uber-go/zap/pull/1148
-[#1185]: https://github.coml/uber-go/zap/pull/1185
-
-## 1.23.0 (24 Aug 2022)
-
-Enhancements:
-* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
- `LevelEnabler` or `Core`.
-* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
- that implement `String() string`.
-
-[#1147]: https://github.com/uber-go/zap/pull/1147
-[#1155]: https://github.com/uber-go/zap/pull/1155
-
-
-## 1.22.0 (8 Aug 2022)
-
-Enhancements:
-* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
- arrays of objects. With these two constructors, you don't need to implement
- `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
- `zapcore.ObjectMarshaler`.
-* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
- `SugaredLogger` with the provided options applied.
-* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
- These functions provide a string joining behavior similar to `fmt.Println`.
-* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
- logger for `Fatal`-level log entries. This defaults to exiting the program.
-* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
- `NewDevelopment` to panic if the system was unable to build the logger.
-* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
- a statement dynamically.
-
-Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
-for their contributions to this release.
-
-[#1071]: https://github.com/uber-go/zap/pull/1071
-[#1079]: https://github.com/uber-go/zap/pull/1079
-[#1080]: https://github.com/uber-go/zap/pull/1080
-[#1088]: https://github.com/uber-go/zap/pull/1088
-[#1108]: https://github.com/uber-go/zap/pull/1108
-[#1118]: https://github.com/uber-go/zap/pull/1118
-
-## 1.21.0 (7 Feb 2022)
-
-Enhancements:
-* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
-* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
- string.
-
-Bugfixes:
-* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
-
-Other changes:
-* [#1052][]: Improve encoding performance when the `AddCaller` and
- `AddStacktrace` options are used together.
-
-[#1047]: https://github.com/uber-go/zap/pull/1047
-[#1048]: https://github.com/uber-go/zap/pull/1048
-[#1052]: https://github.com/uber-go/zap/pull/1052
-[#1058]: https://github.com/uber-go/zap/pull/1058
-
-Thanks to @aerosol and @Techassi for their contributions to this release.
-
-## 1.20.0 (4 Jan 2022)
-
-Enhancements:
-* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
- characters between log statements.
-* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
- encoding of reflected log fields.
-
-Bugfixes:
-* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
-* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
- methods when the methods return.
-* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
-
-Other changes:
-* [#1028][]: Drop support for Go < 1.15.
-
-[#554]: https://github.com/uber-go/zap/pull/554
-[#989]: https://github.com/uber-go/zap/pull/989
-[#1011]: https://github.com/uber-go/zap/pull/1011
-[#1017]: https://github.com/uber-go/zap/pull/1017
-[#1028]: https://github.com/uber-go/zap/pull/1028
-[#1033]: https://github.com/uber-go/zap/pull/1033
-[#1039]: https://github.com/uber-go/zap/pull/1039
-
-Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
-
-## 1.19.1 (8 Sep 2021)
-
-Bugfixes:
-* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
-* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
-
-[#1001]: https://github.com/uber-go/zap/pull/1001
-[#1003]: https://github.com/uber-go/zap/pull/1003
-
-## 1.19.0 (9 Aug 2021)
-
-Enhancements:
-* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
-* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
- better.
-
-[#975]: https://github.com/uber-go/zap/pull/975
-[#984]: https://github.com/uber-go/zap/pull/984
-
-Thanks to @lancoLiu and @thockin for their contributions to this release.
-
-## 1.18.1 (28 Jun 2021)
-
-Bugfixes:
-* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
-
-[#974]: https://github.com/uber-go/zap/pull/974
-
-## 1.18.0 (28 Jun 2021)
-
-Enhancements:
-* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
- messages in-memory and flushes them periodically.
-* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
-* [#897][]: Add `zap.WithClock` option to control the source of time via the
- new `zapcore.Clock` interface.
-* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
- methods don't match expectations.
-* [#943][]: Add support for filtering by level or arbitrary matcher function to
- `zaptest/observer`.
-* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
- `buffer.Buffer`.
-
-Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
-for their contributions to this release.
-
-[#691]: https://github.com/uber-go/zap/pull/691
-[#897]: https://github.com/uber-go/zap/pull/897
-[#943]: https://github.com/uber-go/zap/pull/943
-[#949]: https://github.com/uber-go/zap/pull/949
-[#961]: https://github.com/uber-go/zap/pull/961
-[#971]: https://github.com/uber-go/zap/pull/971
-
-## 1.17.0 (25 May 2021)
-
-Bugfixes:
-* [#867][]: Encode `` for nil `error` instead of a panic.
-* [#931][], [#936][]: Update minimum version constraints to address
- vulnerabilities in dependencies.
-
-Enhancements:
-* [#865][]: Improve alignment of fields of the Logger struct, reducing its
- size from 96 to 80 bytes.
-* [#881][]: Support `grpclog.LoggerV2` in zapgrpc.
-* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler
- with the `application/x-www-form-urlencoded` content type.
-* [#912][]: Support multi-field encoding with `zap.Inline`.
-* [#913][]: Speed up SugaredLogger for calls with a single string.
-* [#928][]: Add support for filtering by field name to `zaptest/observer`.
-
-Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
-
-## 1.16.0 (1 Sep 2020)
-
-Bugfixes:
-* [#828][]: Fix missing newline in IncreaseLevel error messages.
-* [#835][]: Fix panic in JSON encoder when encoding times or durations
- without specifying a time or duration encoder.
-* [#843][]: Honor CallerSkip when taking stack traces.
-* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead.
-* [#854][]: Encode `` for nil `Stringer` instead of a panic error log.
-
-Enhancements:
-* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders
- for custom layouts.
-* [#697][]: Added support for a configurable delimiter in the console encoder.
-* [#852][]: Optimize console encoder by pooling the underlying JSON encoder.
-* [#844][]: Add ability to include the calling function as part of logs.
-* [#843][]: Add `StackSkip` for including truncated stacks as a field.
-* [#861][]: Add options to customize Fatal behaviour for better testability.
-
-Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
-
-## 1.15.0 (23 Apr 2020)
-
-Bugfixes:
-* [#804][]: Fix handling of `Time` values out of `UnixNano` range.
-* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`.
-
-Enhancements:
-* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This
- allows disabling annotation of log entries with caller information if
- previously enabled with `AddCaller`.
-* [#813][]: Deprecate `NewSampler` constructor in favor of
- `NewSamplerWithOptions` which supports a `SamplerHook` option. This option
- adds support for monitoring sampling decisions through a hook.
-
-Thanks to @danielbprice for their contributions to this release.
-
-## 1.14.1 (14 Mar 2020)
-
-Bugfixes:
-* [#791][]: Fix panic on attempting to build a logger with an invalid Config.
-* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's
- development-time dependencies.
-* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to
- be generated for arrays of `time.Time` objects when using string-based time
- formats.
-
-Thanks to @YashishDua for their contributions to this release.
-
-## 1.14.0 (20 Feb 2020)
-
-Enhancements:
-* [#771][]: Optimize calls for disabled log levels.
-* [#773][]: Add millisecond duration encoder.
-* [#775][]: Add option to increase the level of a logger.
-* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible.
-
-Thanks to @caibirdme for their contributions to this release.
-
-## 1.13.0 (13 Nov 2019)
-
-Enhancements:
-* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors
- to log pointers to primitives with support for `nil` values.
-
-Thanks to @jbizzle for their contributions to this release.
-
-## 1.12.0 (29 Oct 2019)
-
-Enhancements:
-* [#751][]: Migrate to Go modules.
-
-## 1.11.0 (21 Oct 2019)
-
-Enhancements:
-* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`.
-* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders.
-
-Thanks to @juicemia, @uhthomas for their contributions to this release.
-
-## 1.10.0 (29 Apr 2019)
-
-Bugfixes:
-* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a
- string.
-* [#706][]: Fix incorrect call depth to determine caller in Go 1.12.
-
-Enhancements:
-* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test
- loggers.
-* [#675][]: Don't panic when encoding a String field.
-* [#704][]: Disable HTML escaping for JSON objects encoded using the
- reflect-based encoder.
-
-Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
-to this release.
-
-## v1.9.1 (06 Aug 2018)
-
-Bugfixes:
-
-* [#614][]: MapObjectEncoder should not ignore empty slices.
-
-## v1.9.0 (19 Jul 2018)
-
-Enhancements:
-* [#602][]: Reduce number of allocations when logging with reflection.
-* [#572][], [#606][]: Expose a registry for third-party logging sinks.
-
-Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
-@dimroc for their contributions to this release.
-
-## v1.8.0 (13 Apr 2018)
-
-Enhancements:
-* [#508][]: Make log level configurable when redirecting the standard
- library's logger.
-* [#518][]: Add a logger that writes to a `*testing.TB`.
-* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
-
-Bugfixes:
-* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
-
-Thanks to @DiSiqueira and @djui for their contributions to this release.
-
-## v1.7.1 (25 Sep 2017)
-
-Bugfixes:
-* [#504][]: Store strings when using AddByteString with the map encoder.
-
-## v1.7.0 (21 Sep 2017)
-
-Enhancements:
-
-* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
- to specify the level of the logged messages.
-
-## v1.6.0 (30 Aug 2017)
-
-Enhancements:
-
-* [#491][]: Omit zap stack frames from stacktraces.
-* [#490][]: Add a `ContextMap` method to observer logs for simpler
- field validation in tests.
-
-## v1.5.0 (22 Jul 2017)
-
-Enhancements:
-
-* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
-* [#465][]: Support user-supplied encoders for logger names.
-
-Bugfixes:
-
-* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
-
-Thanks to @richard-tunein and @pavius for their contributions to this release.
-
-## v1.4.1 (08 Jun 2017)
-
-This release fixes two bugs.
-
-Bugfixes:
-
-* [#435][]: Support a variety of case conventions when unmarshaling levels.
-* [#444][]: Fix a panic in the observer.
-
-## v1.4.0 (12 May 2017)
-
-This release adds a few small features and is fully backward-compatible.
-
-Enhancements:
-
-* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
- override the Unix-style default.
-* [#425][]: Preserve time zones when logging times.
-* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
- variety of operations a bit simpler.
-
-## v1.3.0 (25 Apr 2017)
-
-This release adds an enhancement to zap's testing helpers as well as the
-ability to marshal an AtomicLevel. It is fully backward-compatible.
-
-Enhancements:
-
-* [#415][]: Add a substring-filtering helper to zap's observer. This is
- particularly useful when testing the `SugaredLogger`.
-* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
-
-## v1.2.0 (13 Apr 2017)
-
-This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
-
-Enhancements:
-
-* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
- `grpclog.Logger`.
-
-## v1.1.0 (31 Mar 2017)
-
-This release fixes two bugs and adds some enhancements to zap's testing helpers.
-It is fully backward-compatible.
-
-Bugfixes:
-
-* [#385][]: Fix caller path trimming on Windows.
-* [#396][]: Fix a panic when attempting to use non-existent directories with
- zap's configuration struct.
-
-Enhancements:
-
-* [#386][]: Add filtering helpers to zaptest's observing logger.
-
-Thanks to @moitias for contributing to this release.
-
-## v1.0.0 (14 Mar 2017)
-
-This is zap's first stable release. All exported APIs are now final, and no
-further breaking changes will be made in the 1.x release series. Anyone using a
-semver-aware dependency manager should now pin to `^1`.
-
-Breaking changes:
-
-* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
- casting from `[]byte` to `string`.
-* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
- `zap.Logger`, and `zap.SugaredLogger`.
-* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
- clash with other testing helpers.
-
-Bugfixes:
-
-* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
- for tab-separated console output.
-* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
- work with concurrency-safe `WriteSyncer` implementations.
-* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
- systems.
-* [#373][]: Report the correct caller from zap's standard library
- interoperability wrappers.
-
-Enhancements:
-
-* [#348][]: Add a registry allowing third-party encodings to work with zap's
- built-in `Config`.
-* [#327][]: Make the representation of logger callers configurable (like times,
- levels, and durations).
-* [#376][]: Allow third-party encoders to use their own buffer pools, which
- removes the last performance advantage that zap's encoders have over plugins.
-* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
- `WriteSyncer`s and lock the result.
-* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
- Go 1.9).
-* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
- easier for particularly punctilious users to unit test their application's
- logging.
-
-Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
-contributions to this release.
-
-## v1.0.0-rc.3 (7 Mar 2017)
-
-This is the third release candidate for zap's stable release. There are no
-breaking changes.
-
-Bugfixes:
-
-* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
- rather than `[]uint8`.
-
-Enhancements:
-
-* [#307][]: Users can opt into colored output for log levels.
-* [#353][]: In addition to hijacking the output of the standard library's
- package-global logging functions, users can now construct a zap-backed
- `log.Logger` instance.
-* [#311][]: Frames from common runtime functions and some of zap's internal
- machinery are now omitted from stacktraces.
-
-Thanks to @ansel1 and @suyash for their contributions to this release.
-
-## v1.0.0-rc.2 (21 Feb 2017)
-
-This is the second release candidate for zap's stable release. It includes two
-breaking changes.
-
-Breaking changes:
-
-* [#316][]: Zap's global loggers are now fully concurrency-safe
- (previously, users had to ensure that `ReplaceGlobals` was called before the
- loggers were in use). However, they must now be accessed via the `L()` and
- `S()` functions. Users can update their projects with
-
- ```
- gofmt -r "zap.L -> zap.L()" -w .
- gofmt -r "zap.S -> zap.S()" -w .
- ```
-* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
- JSON and YAML struct tags on all config structs. This release fixes the tags
- and adds static analysis to prevent similar bugs in the future.
-
-Bugfixes:
-
-* [#321][]: Redirecting the standard library's `log` output now
- correctly reports the logger's caller.
-
-Enhancements:
-
-* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
- errors like those produced by `github.com/pkg/errors`.
-* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
- now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
- zap.NewNop()' -w .`.
-* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
- more informative error.
-
-Thanks to @skipor and @chapsuk for their contributions to this release.
-
-## v1.0.0-rc.1 (14 Feb 2017)
-
-This is the first release candidate for zap's stable release. There are multiple
-breaking changes and improvements from the pre-release version. Most notably:
-
-* **Zap's import path is now "go.uber.org/zap"** — all users will
- need to update their code.
-* User-facing types and functions remain in the `zap` package. Code relevant
- largely to extension authors is now in the `zapcore` package.
-* The `zapcore.Core` type makes it easy for third-party packages to use zap's
- internals but provide a different user-facing API.
-* `Logger` is now a concrete type instead of an interface.
-* A less verbose (though slower) logging API is included by default.
-* Package-global loggers `L` and `S` are included.
-* A human-friendly console encoder is included.
-* A declarative config struct allows common logger configurations to be managed
- as configuration instead of code.
-* Sampling is more accurate, and doesn't depend on the standard library's shared
- timer heap.
-
-## v0.1.0-beta.1 (6 Feb 2017)
-
-This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
-upgrade at their leisure. Since this is the first tagged release, there are no
-backward compatibility concerns and all functionality is new.
-
-Early zap adopters should pin to the 0.1.x minor version until they're ready to
-upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
deleted file mode 100644
index e327d9aa5c..0000000000
--- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age,
-body size, disability, ethnicity, gender identity and expression, level of
-experience, nationality, personal appearance, race, religion, or sexual
-identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an
-appointed representative at an online or offline event. Representation of a
-project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team at oss-conduct@uber.com. The project
-team will review and investigate all complaints, and will respond in a way
-that it deems appropriate to the circumstances. The project team is obligated
-to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 1.4, available at
-[http://contributor-covenant.org/version/1/4][version].
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
deleted file mode 100644
index ea02f3cae2..0000000000
--- a/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ /dev/null
@@ -1,70 +0,0 @@
-# Contributing
-
-We'd love your help making zap the very best structured logging library in Go!
-
-If you'd like to add new exported APIs, please [open an issue][open-issue]
-describing your proposal — discussing API changes ahead of time makes
-pull request review much smoother. In your issue, pull request, and any other
-communications, please remember to treat your fellow contributors with
-respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
-
-Note that you'll need to sign [Uber's Contributor License Agreement][cla]
-before we can accept any of your contributions. If necessary, a bot will remind
-you to accept the CLA when you open your pull request.
-
-## Setup
-
-[Fork][fork], then clone the repository:
-
-```bash
-mkdir -p $GOPATH/src/go.uber.org
-cd $GOPATH/src/go.uber.org
-git clone git@github.com:your_github_username/zap.git
-cd zap
-git remote add upstream https://github.com/uber-go/zap.git
-git fetch upstream
-```
-
-Make sure that the tests and the linters pass:
-
-```bash
-make test
-make lint
-```
-
-## Making Changes
-
-Start by creating a new branch for your changes:
-
-```bash
-cd $GOPATH/src/go.uber.org/zap
-git checkout master
-git fetch upstream
-git rebase upstream/master
-git checkout -b cool_new_feature
-```
-
-Make your changes, then ensure that `make lint` and `make test` still pass. If
-you're satisfied with your changes, push them to your fork.
-
-```bash
-git push origin cool_new_feature
-```
-
-Then use the GitHub UI to open a pull request.
-
-At this point, you're waiting on us to review your changes. We _try_ to respond
-to issues and pull requests within a few business days, and we may suggest some
-improvements or alternatives. Once your changes are approved, one of the
-project maintainers will merge them.
-
-We're much more likely to approve your changes if you:
-
-- Add tests for new functionality.
-- Write a [good commit message][commit-message].
-- Maintain backward compatibility.
-
-[fork]: https://github.com/uber-go/zap/fork
-[open-issue]: https://github.com/uber-go/zap/issues/new
-[cla]: https://cla-assistant.io/uber-go/zap
-[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
deleted file mode 100644
index b183b20bc1..0000000000
--- a/vendor/go.uber.org/zap/FAQ.md
+++ /dev/null
@@ -1,164 +0,0 @@
-# Frequently Asked Questions
-
-## Design
-
-### Why spend so much effort on logger performance?
-
-Of course, most applications won't notice the impact of a slow logger: they
-already take tens or hundreds of milliseconds for each operation, so an extra
-millisecond doesn't matter.
-
-On the other hand, why *not* make structured logging fast? The `SugaredLogger`
-isn't any harder to use than other logging packages, and the `Logger` makes
-structured logging possible in performance-sensitive contexts. Across a fleet
-of Go microservices, making each application even slightly more efficient adds
-up quickly.
-
-### Why aren't `Logger` and `SugaredLogger` interfaces?
-
-Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
-`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
-out][go-proverbs], "The bigger the interface, the weaker the abstraction."
-Interfaces are also rigid — *any* change requires releasing a new major
-version, since it breaks all third-party implementations.
-
-Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
-abstraction, and it lets us add methods without introducing breaking changes.
-Your applications should define and depend upon an interface that includes
-just the methods you use.
-
-### Why are some of my logs missing?
-
-Logs are dropped intentionally by zap when sampling is enabled. The production
-configuration (as returned by `NewProductionConfig()` enables sampling which will
-cause repeated logs within a second to be sampled. See more details on why sampling
-is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs).
-
-### Why sample application logs?
-
-Applications often experience runs of errors, either because of a bug or
-because of a misbehaving user. Logging errors is usually a good idea, but it
-can easily make this bad situation worse: not only is your application coping
-with a flood of errors, it's also spending extra CPU cycles and I/O logging
-those errors. Since writes are typically serialized, logging limits throughput
-when you need it most.
-
-Sampling fixes this problem by dropping repetitive log entries. Under normal
-conditions, your application writes out every entry. When similar entries are
-logged hundreds or thousands of times each second, though, zap begins dropping
-duplicates to preserve throughput.
-
-### Why do the structured logging APIs take a message in addition to fields?
-
-Subjectively, we find it helpful to accompany structured context with a brief
-description. This isn't critical during development, but it makes debugging
-and operating unfamiliar systems much easier.
-
-More concretely, zap's sampling algorithm uses the message to identify
-duplicate entries. In our experience, this is a practical middle ground
-between random sampling (which often drops the exact entry that you need while
-debugging) and hashing the complete entry (which is prohibitively expensive).
-
-### Why include package-global loggers?
-
-Since so many other logging packages include a global logger, many
-applications aren't designed to accept loggers as explicit parameters.
-Changing function signatures is often a breaking change, so zap includes
-global loggers to simplify migration.
-
-Avoid them where possible.
-
-### Why include dedicated Panic and Fatal log levels?
-
-In general, application code should handle errors gracefully instead of using
-`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
-crash when an error is truly unrecoverable. To avoid losing any information
-— especially the reason for the crash — the logger must flush any
-buffered entries before the process exits.
-
-Zap makes this easy by offering `Panic` and `Fatal` logging methods that
-automatically flush before exiting. Of course, this doesn't guarantee that
-logs will never be lost, but it eliminates a common error.
-
-See the discussion in uber-go/zap#207 for more details.
-
-### What's `DPanic`?
-
-`DPanic` stands for "panic in development." In development, it logs at
-`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
-catch errors that are theoretically possible, but shouldn't actually happen,
-*without* crashing in production.
-
-If you've ever written code like this, you need `DPanic`:
-
-```go
-if err != nil {
- panic(fmt.Sprintf("shouldn't ever get here: %v", err))
-}
-```
-
-## Installation
-
-### What does the error `expects import "go.uber.org/zap"` mean?
-
-Either zap was installed incorrectly or you're referencing the wrong package
-name in your code.
-
-Zap's source code happens to be hosted on GitHub, but the [import
-path][import-path] is `go.uber.org/zap`. This gives us, the project
-maintainers, the freedom to move the source code if necessary. However, it
-means that you need to take a little care when installing and using the
-package.
-
-If you follow two simple rules, everything should work: install zap with `go
-get -u go.uber.org/zap`, and always import it in your code with `import
-"go.uber.org/zap"`. Your code shouldn't contain *any* references to
-`github.com/uber-go/zap`.
-
-## Usage
-
-### Does zap support log rotation?
-
-Zap doesn't natively support rotating log files, since we prefer to leave this
-to an external program like `logrotate`.
-
-However, it's easy to integrate a log rotation package like
-[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
-
-```go
-// lumberjack.Logger is already safe for concurrent use, so we don't need to
-// lock it.
-w := zapcore.AddSync(&lumberjack.Logger{
- Filename: "/var/log/myapp/foo.log",
- MaxSize: 500, // megabytes
- MaxBackups: 3,
- MaxAge: 28, // days
-})
-core := zapcore.NewCore(
- zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
- w,
- zap.InfoLevel,
-)
-logger := zap.New(core)
-```
-
-## Extensions
-
-We'd love to support every logging need within zap itself, but we're only
-familiar with a handful of log ingestion systems, flag-parsing packages, and
-the like. Rather than merging code that we can't effectively debug and
-support, we'd rather grow an ecosystem of zap extensions.
-
-We're aware of the following extensions, but haven't used them ourselves:
-
-| Package | Integration |
-| --- | --- |
-| `github.com/tchap/zapext` | Sentry, syslog |
-| `github.com/fgrosse/zaptest` | Ginkgo |
-| `github.com/blendle/zapdriver` | Stackdriver |
-| `github.com/moul/zapgorm` | Gorm |
-| `github.com/moul/zapfilter` | Advanced filtering rules |
-
-[go-proverbs]: https://go-proverbs.github.io/
-[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
-[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt
deleted file mode 100644
index 6652bed45f..0000000000
--- a/vendor/go.uber.org/zap/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016-2017 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
deleted file mode 100644
index 9b1bc3b0e1..0000000000
--- a/vendor/go.uber.org/zap/Makefile
+++ /dev/null
@@ -1,73 +0,0 @@
-export GOBIN ?= $(shell pwd)/bin
-
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
-BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
-
-# Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
-
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
-
-.PHONY: all
-all: lint test
-
-.PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking formatting..."
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking vet..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking lint..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking staticcheck..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking for unresolved FIXMEs..."
- @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
- @echo "Checking for license headers..."
- @./checklicense.sh | tee -a lint.log
- @[ ! -s lint.log ]
- @echo "Checking 'go mod tidy'..."
- @make tidy
- @if ! git diff --quiet; then \
- echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
- git --no-pager diff; \
- fi
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-.PHONY: test
-test:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
-
-.PHONY: cover
-cover:
- go test -race -coverprofile=cover.out -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: bench
-BENCH ?= .
-bench:
- @$(foreach dir,$(MODULE_DIRS), ( \
- cd $(dir) && \
- go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
- ) &&) true
-
-.PHONY: updatereadme
-updatereadme:
- rm -f README.md
- cat .readme.tmpl | go run internal/readme/readme.go > README.md
-
-.PHONY: tidy
-tidy:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
deleted file mode 100644
index a553a428c8..0000000000
--- a/vendor/go.uber.org/zap/README.md
+++ /dev/null
@@ -1,133 +0,0 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
-
-Blazing fast, structured, leveled logging in Go.
-
-## Installation
-
-`go get -u go.uber.org/zap`
-
-Note that zap only supports the two most recent minor versions of Go.
-
-## Quick Start
-
-In contexts where performance is nice, but not critical, use the
-`SugaredLogger`. It's 4-10x faster than other structured logging
-packages and includes both structured and `printf`-style APIs.
-
-```go
-logger, _ := zap.NewProduction()
-defer logger.Sync() // flushes buffer, if any
-sugar := logger.Sugar()
-sugar.Infow("failed to fetch URL",
- // Structured context as loosely typed key-value pairs.
- "url", url,
- "attempt", 3,
- "backoff", time.Second,
-)
-sugar.Infof("Failed to fetch URL: %s", url)
-```
-
-When performance and type safety are critical, use the `Logger`. It's even
-faster than the `SugaredLogger` and allocates far less, but it only supports
-structured logging.
-
-```go
-logger, _ := zap.NewProduction()
-defer logger.Sync()
-logger.Info("failed to fetch URL",
- // Structured context as strongly typed Field values.
- zap.String("url", url),
- zap.Int("attempt", 3),
- zap.Duration("backoff", time.Second),
-)
-```
-
-See the [documentation][doc] and [FAQ](FAQ.md) for more details.
-
-## Performance
-
-For applications that log in the hot path, reflection-based serialization and
-string formatting are prohibitively expensive — they're CPU-intensive
-and make many small allocations. Put differently, using `encoding/json` and
-`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
-
-Zap takes a different approach. It includes a reflection-free, zero-allocation
-JSON encoder, and the base `Logger` strives to avoid serialization overhead
-and allocations wherever possible. By building the high-level `SugaredLogger`
-on that foundation, zap lets users _choose_ when they need to count every
-allocation and when they'd prefer a more familiar, loosely typed API.
-
-As measured by its own [benchmarking suite][], not only is zap more performant
-than comparable structured logging packages — it's also faster than the
-standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
-
-Log a message and 10 fields:
-
-| Package | Time | Time % to zap | Objects Allocated |
-| :------------------ | :---------: | :-----------: | :---------------: |
-| :zap: zap | 2900 ns/op | +0% | 5 allocs/op |
-| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op |
-| zerolog | 10639 ns/op | +267% | 32 allocs/op |
-| go-kit | 14434 ns/op | +398% | 59 allocs/op |
-| logrus | 17104 ns/op | +490% | 81 allocs/op |
-| apex/log | 32424 ns/op | +1018% | 66 allocs/op |
-| log15 | 33579 ns/op | +1058% | 76 allocs/op |
-
-Log a message with a logger that already has 10 fields of context:
-
-| Package | Time | Time % to zap | Objects Allocated |
-| :------------------ | :---------: | :-----------: | :---------------: |
-| :zap: zap | 373 ns/op | +0% | 0 allocs/op |
-| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op |
-| zerolog | 288 ns/op | -23% | 0 allocs/op |
-| go-kit | 11785 ns/op | +3060% | 58 allocs/op |
-| logrus | 19629 ns/op | +5162% | 70 allocs/op |
-| log15 | 21866 ns/op | +5762% | 72 allocs/op |
-| apex/log | 30890 ns/op | +8182% | 55 allocs/op |
-
-Log a static string, without any context or `printf`-style templating:
-
-| Package | Time | Time % to zap | Objects Allocated |
-| :------------------ | :--------: | :-----------: | :---------------: |
-| :zap: zap | 381 ns/op | +0% | 0 allocs/op |
-| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op |
-| zerolog | 369 ns/op | -3% | 0 allocs/op |
-| standard library | 385 ns/op | +1% | 2 allocs/op |
-| go-kit | 606 ns/op | +59% | 11 allocs/op |
-| logrus | 1730 ns/op | +354% | 25 allocs/op |
-| apex/log | 1998 ns/op | +424% | 7 allocs/op |
-| log15 | 4546 ns/op | +1093% | 22 allocs/op |
-
-## Development Status: Stable
-
-All APIs are finalized, and no breaking changes will be made in the 1.x series
-of releases. Users of semver-aware dependency management systems should pin
-zap to `^1`.
-
-## Contributing
-
-We encourage and support an active, healthy community of contributors —
-including you! Details are in the [contribution guide](CONTRIBUTING.md) and
-the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
-issues and pull requests, but you can also report any negative conduct to
-oss-conduct@uber.com. That email list is a private, safe space; even the zap
-maintainers don't have access, so don't hesitate to hold us to a high
-standard.
-
-
-
-Released under the [MIT License](LICENSE.txt).
-
- In particular, keep in mind that we may be
-benchmarking against slightly older versions of other packages. Versions are
-pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-
-[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
-[doc]: https://pkg.go.dev/go.uber.org/zap
-[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
-[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
-[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/zap
-[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
deleted file mode 100644
index 5be3704a3e..0000000000
--- a/vendor/go.uber.org/zap/array.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "time"
-
- "go.uber.org/zap/zapcore"
-)
-
-// Array constructs a field with the given key and ArrayMarshaler. It provides
-// a flexible, but still type-safe and efficient, way to add array-like types
-// to the logging context. The struct's MarshalLogArray method is called lazily.
-func Array(key string, val zapcore.ArrayMarshaler) Field {
- return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
-}
-
-// Bools constructs a field that carries a slice of bools.
-func Bools(key string, bs []bool) Field {
- return Array(key, bools(bs))
-}
-
-// ByteStrings constructs a field that carries a slice of []byte, each of which
-// must be UTF-8 encoded text.
-func ByteStrings(key string, bss [][]byte) Field {
- return Array(key, byteStringsArray(bss))
-}
-
-// Complex128s constructs a field that carries a slice of complex numbers.
-func Complex128s(key string, nums []complex128) Field {
- return Array(key, complex128s(nums))
-}
-
-// Complex64s constructs a field that carries a slice of complex numbers.
-func Complex64s(key string, nums []complex64) Field {
- return Array(key, complex64s(nums))
-}
-
-// Durations constructs a field that carries a slice of time.Durations.
-func Durations(key string, ds []time.Duration) Field {
- return Array(key, durations(ds))
-}
-
-// Float64s constructs a field that carries a slice of floats.
-func Float64s(key string, nums []float64) Field {
- return Array(key, float64s(nums))
-}
-
-// Float32s constructs a field that carries a slice of floats.
-func Float32s(key string, nums []float32) Field {
- return Array(key, float32s(nums))
-}
-
-// Ints constructs a field that carries a slice of integers.
-func Ints(key string, nums []int) Field {
- return Array(key, ints(nums))
-}
-
-// Int64s constructs a field that carries a slice of integers.
-func Int64s(key string, nums []int64) Field {
- return Array(key, int64s(nums))
-}
-
-// Int32s constructs a field that carries a slice of integers.
-func Int32s(key string, nums []int32) Field {
- return Array(key, int32s(nums))
-}
-
-// Int16s constructs a field that carries a slice of integers.
-func Int16s(key string, nums []int16) Field {
- return Array(key, int16s(nums))
-}
-
-// Int8s constructs a field that carries a slice of integers.
-func Int8s(key string, nums []int8) Field {
- return Array(key, int8s(nums))
-}
-
-// Strings constructs a field that carries a slice of strings.
-func Strings(key string, ss []string) Field {
- return Array(key, stringArray(ss))
-}
-
-// Times constructs a field that carries a slice of time.Times.
-func Times(key string, ts []time.Time) Field {
- return Array(key, times(ts))
-}
-
-// Uints constructs a field that carries a slice of unsigned integers.
-func Uints(key string, nums []uint) Field {
- return Array(key, uints(nums))
-}
-
-// Uint64s constructs a field that carries a slice of unsigned integers.
-func Uint64s(key string, nums []uint64) Field {
- return Array(key, uint64s(nums))
-}
-
-// Uint32s constructs a field that carries a slice of unsigned integers.
-func Uint32s(key string, nums []uint32) Field {
- return Array(key, uint32s(nums))
-}
-
-// Uint16s constructs a field that carries a slice of unsigned integers.
-func Uint16s(key string, nums []uint16) Field {
- return Array(key, uint16s(nums))
-}
-
-// Uint8s constructs a field that carries a slice of unsigned integers.
-func Uint8s(key string, nums []uint8) Field {
- return Array(key, uint8s(nums))
-}
-
-// Uintptrs constructs a field that carries a slice of pointer addresses.
-func Uintptrs(key string, us []uintptr) Field {
- return Array(key, uintptrs(us))
-}
-
-// Errors constructs a field that carries a slice of errors.
-func Errors(key string, errs []error) Field {
- return Array(key, errArray(errs))
-}
-
-type bools []bool
-
-func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range bs {
- arr.AppendBool(bs[i])
- }
- return nil
-}
-
-type byteStringsArray [][]byte
-
-func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range bss {
- arr.AppendByteString(bss[i])
- }
- return nil
-}
-
-type complex128s []complex128
-
-func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendComplex128(nums[i])
- }
- return nil
-}
-
-type complex64s []complex64
-
-func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendComplex64(nums[i])
- }
- return nil
-}
-
-type durations []time.Duration
-
-func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range ds {
- arr.AppendDuration(ds[i])
- }
- return nil
-}
-
-type float64s []float64
-
-func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendFloat64(nums[i])
- }
- return nil
-}
-
-type float32s []float32
-
-func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendFloat32(nums[i])
- }
- return nil
-}
-
-type ints []int
-
-func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendInt(nums[i])
- }
- return nil
-}
-
-type int64s []int64
-
-func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendInt64(nums[i])
- }
- return nil
-}
-
-type int32s []int32
-
-func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendInt32(nums[i])
- }
- return nil
-}
-
-type int16s []int16
-
-func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendInt16(nums[i])
- }
- return nil
-}
-
-type int8s []int8
-
-func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendInt8(nums[i])
- }
- return nil
-}
-
-type stringArray []string
-
-func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range ss {
- arr.AppendString(ss[i])
- }
- return nil
-}
-
-type times []time.Time
-
-func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range ts {
- arr.AppendTime(ts[i])
- }
- return nil
-}
-
-type uints []uint
-
-func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUint(nums[i])
- }
- return nil
-}
-
-type uint64s []uint64
-
-func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUint64(nums[i])
- }
- return nil
-}
-
-type uint32s []uint32
-
-func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUint32(nums[i])
- }
- return nil
-}
-
-type uint16s []uint16
-
-func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUint16(nums[i])
- }
- return nil
-}
-
-type uint8s []uint8
-
-func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUint8(nums[i])
- }
- return nil
-}
-
-type uintptrs []uintptr
-
-func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range nums {
- arr.AppendUintptr(nums[i])
- }
- return nil
-}
diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go
deleted file mode 100644
index d0d2c49d69..0000000000
--- a/vendor/go.uber.org/zap/array_go118.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18
-// +build go1.18
-
-package zap
-
-import (
- "fmt"
-
- "go.uber.org/zap/zapcore"
-)
-
-// Objects constructs a field with the given key, holding a list of the
-// provided objects that can be marshaled by Zap.
-//
-// Note that these objects must implement zapcore.ObjectMarshaler directly.
-// That is, if you're trying to marshal a []Request, the MarshalLogObject
-// method must be declared on the Request type, not its pointer (*Request).
-// If it's on the pointer, use ObjectValues.
-//
-// Given an object that implements MarshalLogObject on the value receiver, you
-// can log a slice of those objects with Objects like so:
-//
-// type Author struct{ ... }
-// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var authors []Author = ...
-// logger.Info("loading article", zap.Objects("authors", authors))
-//
-// Similarly, given a type that implements MarshalLogObject on its pointer
-// receiver, you can log a slice of pointers to that object with Objects like
-// so:
-//
-// type Request struct{ ... }
-// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var requests []*Request = ...
-// logger.Info("sending requests", zap.Objects("requests", requests))
-//
-// If instead, you have a slice of values of such an object, use the
-// ObjectValues constructor.
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.ObjectValues("requests", requests))
-func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
- return Array(key, objects[T](values))
-}
-
-type objects[T zapcore.ObjectMarshaler] []T
-
-func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for _, o := range os {
- if err := arr.AppendObject(o); err != nil {
- return err
- }
- }
- return nil
-}
-
-// ObjectMarshalerPtr is a constraint that specifies that the given type
-// implements zapcore.ObjectMarshaler on a pointer receiver.
-type ObjectMarshalerPtr[T any] interface {
- *T
- zapcore.ObjectMarshaler
-}
-
-// ObjectValues constructs a field with the given key, holding a list of the
-// provided objects, where pointers to these objects can be marshaled by Zap.
-//
-// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
-// That is, if you're trying to marshal a []Request, the MarshalLogObject
-// method must be declared on the *Request type, not the value (Request).
-// If it's on the value, use Objects.
-//
-// Given an object that implements MarshalLogObject on the pointer receiver,
-// you can log a slice of those objects with ObjectValues like so:
-//
-// type Request struct{ ... }
-// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.ObjectValues("requests", requests))
-//
-// If instead, you have a slice of pointers of such an object, use the Objects
-// field constructor.
-//
-// var requests []*Request = ...
-// logger.Info("sending requests", zap.Objects("requests", requests))
-func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
- return Array(key, objectValues[T, P](values))
-}
-
-type objectValues[T any, P ObjectMarshalerPtr[T]] []T
-
-func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range os {
- // It is necessary for us to explicitly reference the "P" type.
- // We cannot simply pass "&os[i]" to AppendObject because its type
- // is "*T", which the type system does not consider as
- // implementing ObjectMarshaler.
- // Only the type "P" satisfies ObjectMarshaler, which we have
- // to convert "*T" to explicitly.
- var p P = &os[i]
- if err := arr.AppendObject(p); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Stringers constructs a field with the given key, holding a list of the
-// output provided by the value's String method
-//
-// Given an object that implements String on the value receiver, you
-// can log a slice of those objects with Objects like so:
-//
-// type Request struct{ ... }
-// func (a Request) String() string
-//
-// var requests []Request = ...
-// logger.Info("sending requests", zap.Stringers("requests", requests))
-//
-// Note that these objects must implement fmt.Stringer directly.
-// That is, if you're trying to marshal a []Request, the String method
-// must be declared on the Request type, not its pointer (*Request).
-func Stringers[T fmt.Stringer](key string, values []T) Field {
- return Array(key, stringers[T](values))
-}
-
-type stringers[T fmt.Stringer] []T
-
-func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for _, o := range os {
- arr.AppendString(o.String())
- }
- return nil
-}
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
deleted file mode 100644
index 9e929cd98e..0000000000
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package buffer provides a thin wrapper around a byte slice. Unlike the
-// standard library's bytes.Buffer, it supports a portion of the strconv
-// package's zero-allocation formatters.
-package buffer // import "go.uber.org/zap/buffer"
-
-import (
- "strconv"
- "time"
-)
-
-const _size = 1024 // by default, create 1 KiB buffers
-
-// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
-// the only way to construct one is via a Pool.
-type Buffer struct {
- bs []byte
- pool Pool
-}
-
-// AppendByte writes a single byte to the Buffer.
-func (b *Buffer) AppendByte(v byte) {
- b.bs = append(b.bs, v)
-}
-
-// AppendString writes a string to the Buffer.
-func (b *Buffer) AppendString(s string) {
- b.bs = append(b.bs, s...)
-}
-
-// AppendInt appends an integer to the underlying buffer (assuming base 10).
-func (b *Buffer) AppendInt(i int64) {
- b.bs = strconv.AppendInt(b.bs, i, 10)
-}
-
-// AppendTime appends the time formatted using the specified layout.
-func (b *Buffer) AppendTime(t time.Time, layout string) {
- b.bs = t.AppendFormat(b.bs, layout)
-}
-
-// AppendUint appends an unsigned integer to the underlying buffer (assuming
-// base 10).
-func (b *Buffer) AppendUint(i uint64) {
- b.bs = strconv.AppendUint(b.bs, i, 10)
-}
-
-// AppendBool appends a bool to the underlying buffer.
-func (b *Buffer) AppendBool(v bool) {
- b.bs = strconv.AppendBool(b.bs, v)
-}
-
-// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
-// or +/- Inf.
-func (b *Buffer) AppendFloat(f float64, bitSize int) {
- b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
-}
-
-// Len returns the length of the underlying byte slice.
-func (b *Buffer) Len() int {
- return len(b.bs)
-}
-
-// Cap returns the capacity of the underlying byte slice.
-func (b *Buffer) Cap() int {
- return cap(b.bs)
-}
-
-// Bytes returns a mutable reference to the underlying byte slice.
-func (b *Buffer) Bytes() []byte {
- return b.bs
-}
-
-// String returns a string copy of the underlying byte slice.
-func (b *Buffer) String() string {
- return string(b.bs)
-}
-
-// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
-// backing array.
-func (b *Buffer) Reset() {
- b.bs = b.bs[:0]
-}
-
-// Write implements io.Writer.
-func (b *Buffer) Write(bs []byte) (int, error) {
- b.bs = append(b.bs, bs...)
- return len(bs), nil
-}
-
-// WriteByte writes a single byte to the Buffer.
-//
-// Error returned is always nil, function signature is compatible
-// with bytes.Buffer and bufio.Writer
-func (b *Buffer) WriteByte(v byte) error {
- b.AppendByte(v)
- return nil
-}
-
-// WriteString writes a string to the Buffer.
-//
-// Error returned is always nil, function signature is compatible
-// with bytes.Buffer and bufio.Writer
-func (b *Buffer) WriteString(s string) (int, error) {
- b.AppendString(s)
- return len(s), nil
-}
-
-// TrimNewline trims any final "\n" byte from the end of the buffer.
-func (b *Buffer) TrimNewline() {
- if i := len(b.bs) - 1; i >= 0 {
- if b.bs[i] == '\n' {
- b.bs = b.bs[:i]
- }
- }
-}
-
-// Free returns the Buffer to its Pool.
-//
-// Callers must not retain references to the Buffer after calling Free.
-func (b *Buffer) Free() {
- b.pool.put(b)
-}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
deleted file mode 100644
index 8fb3e202cf..0000000000
--- a/vendor/go.uber.org/zap/buffer/pool.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package buffer
-
-import "sync"
-
-// A Pool is a type-safe wrapper around a sync.Pool.
-type Pool struct {
- p *sync.Pool
-}
-
-// NewPool constructs a new Pool.
-func NewPool() Pool {
- return Pool{p: &sync.Pool{
- New: func() interface{} {
- return &Buffer{bs: make([]byte, 0, _size)}
- },
- }}
-}
-
-// Get retrieves a Buffer from the pool, creating one if necessary.
-func (p Pool) Get() *Buffer {
- buf := p.p.Get().(*Buffer)
- buf.Reset()
- buf.pool = p
- return buf
-}
-
-func (p Pool) put(buf *Buffer) {
- p.p.Put(buf)
-}
diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh
deleted file mode 100644
index 345ac8b89a..0000000000
--- a/vendor/go.uber.org/zap/checklicense.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash -e
-
-ERROR_COUNT=0
-while read -r file
-do
- case "$(head -1 "${file}")" in
- *"Copyright (c) "*" Uber Technologies, Inc.")
- # everything's cool
- ;;
- *)
- echo "$file is missing license header."
- (( ERROR_COUNT++ ))
- ;;
- esac
-done < <(git ls-files "*\.go")
-
-exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
deleted file mode 100644
index ee6096766a..0000000000
--- a/vendor/go.uber.org/zap/config.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "errors"
- "sort"
- "time"
-
- "go.uber.org/zap/zapcore"
-)
-
-// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
-// global CPU and I/O load that logging puts on your process while attempting
-// to preserve a representative subset of your logs.
-//
-// If specified, the Sampler will invoke the Hook after each decision.
-//
-// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
-// details.
-type SamplingConfig struct {
- Initial int `json:"initial" yaml:"initial"`
- Thereafter int `json:"thereafter" yaml:"thereafter"`
- Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
-}
-
-// Config offers a declarative way to construct a logger. It doesn't do
-// anything that can't be done with New, Options, and the various
-// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
-// toggle common options.
-//
-// Note that Config intentionally supports only the most common options. More
-// unusual logging setups (logging to network connections or message queues,
-// splitting output between multiple files, etc.) are possible, but require
-// direct use of the zapcore package. For sample code, see the package-level
-// BasicConfiguration and AdvancedConfiguration examples.
-//
-// For an example showing runtime log level changes, see the documentation for
-// AtomicLevel.
-type Config struct {
- // Level is the minimum enabled logging level. Note that this is a dynamic
- // level, so calling Config.Level.SetLevel will atomically change the log
- // level of all loggers descended from this config.
- Level AtomicLevel `json:"level" yaml:"level"`
- // Development puts the logger in development mode, which changes the
- // behavior of DPanicLevel and takes stacktraces more liberally.
- Development bool `json:"development" yaml:"development"`
- // DisableCaller stops annotating logs with the calling function's file
- // name and line number. By default, all logs are annotated.
- DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
- // DisableStacktrace completely disables automatic stacktrace capturing. By
- // default, stacktraces are captured for WarnLevel and above logs in
- // development and ErrorLevel and above in production.
- DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
- // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
- Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
- // Encoding sets the logger's encoding. Valid values are "json" and
- // "console", as well as any third-party encodings registered via
- // RegisterEncoder.
- Encoding string `json:"encoding" yaml:"encoding"`
- // EncoderConfig sets options for the chosen encoder. See
- // zapcore.EncoderConfig for details.
- EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
- // OutputPaths is a list of URLs or file paths to write logging output to.
- // See Open for details.
- OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
- // ErrorOutputPaths is a list of URLs to write internal logger errors to.
- // The default is standard error.
- //
- // Note that this setting only affects internal errors; for sample code that
- // sends error-level logs to a different location from info- and debug-level
- // logs, see the package-level AdvancedConfiguration example.
- ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
- // InitialFields is a collection of fields to add to the root logger.
- InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
-}
-
-// NewProductionEncoderConfig returns an opinionated EncoderConfig for
-// production environments.
-func NewProductionEncoderConfig() zapcore.EncoderConfig {
- return zapcore.EncoderConfig{
- TimeKey: "ts",
- LevelKey: "level",
- NameKey: "logger",
- CallerKey: "caller",
- FunctionKey: zapcore.OmitKey,
- MessageKey: "msg",
- StacktraceKey: "stacktrace",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: zapcore.LowercaseLevelEncoder,
- EncodeTime: zapcore.EpochTimeEncoder,
- EncodeDuration: zapcore.SecondsDurationEncoder,
- EncodeCaller: zapcore.ShortCallerEncoder,
- }
-}
-
-// NewProductionConfig is a reasonable production logging configuration.
-// Logging is enabled at InfoLevel and above.
-//
-// It uses a JSON encoder, writes to standard error, and enables sampling.
-// Stacktraces are automatically included on logs of ErrorLevel and above.
-func NewProductionConfig() Config {
- return Config{
- Level: NewAtomicLevelAt(InfoLevel),
- Development: false,
- Sampling: &SamplingConfig{
- Initial: 100,
- Thereafter: 100,
- },
- Encoding: "json",
- EncoderConfig: NewProductionEncoderConfig(),
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
- }
-}
-
-// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
-// development environments.
-func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
- return zapcore.EncoderConfig{
- // Keys can be anything except the empty string.
- TimeKey: "T",
- LevelKey: "L",
- NameKey: "N",
- CallerKey: "C",
- FunctionKey: zapcore.OmitKey,
- MessageKey: "M",
- StacktraceKey: "S",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: zapcore.CapitalLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
- EncodeDuration: zapcore.StringDurationEncoder,
- EncodeCaller: zapcore.ShortCallerEncoder,
- }
-}
-
-// NewDevelopmentConfig is a reasonable development logging configuration.
-// Logging is enabled at DebugLevel and above.
-//
-// It enables development mode (which makes DPanicLevel logs panic), uses a
-// console encoder, writes to standard error, and disables sampling.
-// Stacktraces are automatically included on logs of WarnLevel and above.
-func NewDevelopmentConfig() Config {
- return Config{
- Level: NewAtomicLevelAt(DebugLevel),
- Development: true,
- Encoding: "console",
- EncoderConfig: NewDevelopmentEncoderConfig(),
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
- }
-}
-
-// Build constructs a logger from the Config and Options.
-func (cfg Config) Build(opts ...Option) (*Logger, error) {
- enc, err := cfg.buildEncoder()
- if err != nil {
- return nil, err
- }
-
- sink, errSink, err := cfg.openSinks()
- if err != nil {
- return nil, err
- }
-
- if cfg.Level == (AtomicLevel{}) {
- return nil, errors.New("missing Level")
- }
-
- log := New(
- zapcore.NewCore(enc, sink, cfg.Level),
- cfg.buildOptions(errSink)...,
- )
- if len(opts) > 0 {
- log = log.WithOptions(opts...)
- }
- return log, nil
-}
-
-func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
- opts := []Option{ErrorOutput(errSink)}
-
- if cfg.Development {
- opts = append(opts, Development())
- }
-
- if !cfg.DisableCaller {
- opts = append(opts, AddCaller())
- }
-
- stackLevel := ErrorLevel
- if cfg.Development {
- stackLevel = WarnLevel
- }
- if !cfg.DisableStacktrace {
- opts = append(opts, AddStacktrace(stackLevel))
- }
-
- if scfg := cfg.Sampling; scfg != nil {
- opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
- var samplerOpts []zapcore.SamplerOption
- if scfg.Hook != nil {
- samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
- }
- return zapcore.NewSamplerWithOptions(
- core,
- time.Second,
- cfg.Sampling.Initial,
- cfg.Sampling.Thereafter,
- samplerOpts...,
- )
- }))
- }
-
- if len(cfg.InitialFields) > 0 {
- fs := make([]Field, 0, len(cfg.InitialFields))
- keys := make([]string, 0, len(cfg.InitialFields))
- for k := range cfg.InitialFields {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- fs = append(fs, Any(k, cfg.InitialFields[k]))
- }
- opts = append(opts, Fields(fs...))
- }
-
- return opts
-}
-
-func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
- sink, closeOut, err := Open(cfg.OutputPaths...)
- if err != nil {
- return nil, nil, err
- }
- errSink, _, err := Open(cfg.ErrorOutputPaths...)
- if err != nil {
- closeOut()
- return nil, nil, err
- }
- return sink, errSink, nil
-}
-
-func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
- return newEncoder(cfg.Encoding, cfg.EncoderConfig)
-}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
deleted file mode 100644
index 3c50d7b4d3..0000000000
--- a/vendor/go.uber.org/zap/doc.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package zap provides fast, structured, leveled logging.
-//
-// For applications that log in the hot path, reflection-based serialization
-// and string formatting are prohibitively expensive - they're CPU-intensive
-// and make many small allocations. Put differently, using json.Marshal and
-// fmt.Fprintf to log tons of interface{} makes your application slow.
-//
-// Zap takes a different approach. It includes a reflection-free,
-// zero-allocation JSON encoder, and the base Logger strives to avoid
-// serialization overhead and allocations wherever possible. By building the
-// high-level SugaredLogger on that foundation, zap lets users choose when
-// they need to count every allocation and when they'd prefer a more familiar,
-// loosely typed API.
-//
-// # Choosing a Logger
-//
-// In contexts where performance is nice, but not critical, use the
-// SugaredLogger. It's 4-10x faster than other structured logging packages and
-// supports both structured and printf-style logging. Like log15 and go-kit,
-// the SugaredLogger's structured logging APIs are loosely typed and accept a
-// variadic number of key-value pairs. (For more advanced use cases, they also
-// accept strongly typed fields - see the SugaredLogger.With documentation for
-// details.)
-//
-// sugar := zap.NewExample().Sugar()
-// defer sugar.Sync()
-// sugar.Infow("failed to fetch URL",
-// "url", "http://example.com",
-// "attempt", 3,
-// "backoff", time.Second,
-// )
-// sugar.Infof("failed to fetch URL: %s", "http://example.com")
-//
-// By default, loggers are unbuffered. However, since zap's low-level APIs
-// allow buffering, calling Sync before letting your process exit is a good
-// habit.
-//
-// In the rare contexts where every microsecond and every allocation matter,
-// use the Logger. It's even faster than the SugaredLogger and allocates far
-// less, but it only supports strongly-typed, structured logging.
-//
-// logger := zap.NewExample()
-// defer logger.Sync()
-// logger.Info("failed to fetch URL",
-// zap.String("url", "http://example.com"),
-// zap.Int("attempt", 3),
-// zap.Duration("backoff", time.Second),
-// )
-//
-// Choosing between the Logger and SugaredLogger doesn't need to be an
-// application-wide decision: converting between the two is simple and
-// inexpensive.
-//
-// logger := zap.NewExample()
-// defer logger.Sync()
-// sugar := logger.Sugar()
-// plain := sugar.Desugar()
-//
-// # Configuring Zap
-//
-// The simplest way to build a Logger is to use zap's opinionated presets:
-// NewExample, NewProduction, and NewDevelopment. These presets build a logger
-// with a single function call:
-//
-// logger, err := zap.NewProduction()
-// if err != nil {
-// log.Fatalf("can't initialize zap logger: %v", err)
-// }
-// defer logger.Sync()
-//
-// Presets are fine for small projects, but larger projects and organizations
-// naturally require a bit more customization. For most users, zap's Config
-// struct strikes the right balance between flexibility and convenience. See
-// the package-level BasicConfiguration example for sample code.
-//
-// More unusual configurations (splitting output between files, sending logs
-// to a message queue, etc.) are possible, but require direct use of
-// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
-// example for sample code.
-//
-// # Extending Zap
-//
-// The zap package itself is a relatively thin wrapper around the interfaces
-// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
-// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
-// exception aggregation service, like Sentry or Rollbar) typically requires
-// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
-// interfaces. See the zapcore documentation for details.
-//
-// Similarly, package authors can use the high-performance Encoder and Core
-// implementations in the zapcore package to build their own loggers.
-//
-// # Frequently Asked Questions
-//
-// An FAQ covering everything from installation errors to design decisions is
-// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
-package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
deleted file mode 100644
index caa04ceefd..0000000000
--- a/vendor/go.uber.org/zap/encoder.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "errors"
- "fmt"
- "sync"
-
- "go.uber.org/zap/zapcore"
-)
-
-var (
- errNoEncoderNameSpecified = errors.New("no encoder name specified")
-
- _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
- "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
- return zapcore.NewConsoleEncoder(encoderConfig), nil
- },
- "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
- return zapcore.NewJSONEncoder(encoderConfig), nil
- },
- }
- _encoderMutex sync.RWMutex
-)
-
-// RegisterEncoder registers an encoder constructor, which the Config struct
-// can then reference. By default, the "json" and "console" encoders are
-// registered.
-//
-// Attempting to register an encoder whose name is already taken returns an
-// error.
-func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
- _encoderMutex.Lock()
- defer _encoderMutex.Unlock()
- if name == "" {
- return errNoEncoderNameSpecified
- }
- if _, ok := _encoderNameToConstructor[name]; ok {
- return fmt.Errorf("encoder already registered for name %q", name)
- }
- _encoderNameToConstructor[name] = constructor
- return nil
-}
-
-func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
- if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
- return nil, errors.New("missing EncodeTime in EncoderConfig")
- }
-
- _encoderMutex.RLock()
- defer _encoderMutex.RUnlock()
- if name == "" {
- return nil, errNoEncoderNameSpecified
- }
- constructor, ok := _encoderNameToConstructor[name]
- if !ok {
- return nil, fmt.Errorf("no encoder registered for name %q", name)
- }
- return constructor(encoderConfig)
-}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
deleted file mode 100644
index 65982a51e5..0000000000
--- a/vendor/go.uber.org/zap/error.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "sync"
-
- "go.uber.org/zap/zapcore"
-)
-
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
- return &errArrayElem{}
-}}
-
-// Error is shorthand for the common idiom NamedError("error", err).
-func Error(err error) Field {
- return NamedError("error", err)
-}
-
-// NamedError constructs a field that lazily stores err.Error() under the
-// provided key. Errors which also implement fmt.Formatter (like those produced
-// by github.com/pkg/errors) will also have their verbose representation stored
-// under key+"Verbose". If passed a nil error, the field is a no-op.
-//
-// For the common case in which the key is simply "error", the Error function
-// is shorter and less repetitive.
-func NamedError(key string, err error) Field {
- if err == nil {
- return Skip()
- }
- return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
-}
-
-type errArray []error
-
-func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
- for i := range errs {
- if errs[i] == nil {
- continue
- }
- // To represent each error as an object with an "error" attribute and
- // potentially an "errorVerbose" attribute, we need to wrap it in a
- // type that implements LogObjectMarshaler. To prevent this from
- // allocating, pool the wrapper type.
- elem := _errArrayElemPool.Get().(*errArrayElem)
- elem.error = errs[i]
- arr.AppendObject(elem)
- elem.error = nil
- _errArrayElemPool.Put(elem)
- }
- return nil
-}
-
-type errArrayElem struct {
- error
-}
-
-func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- // Re-use the error field's logic, which supports non-standard error types.
- Error(e.error).AddTo(enc)
- return nil
-}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
deleted file mode 100644
index bbb745db5b..0000000000
--- a/vendor/go.uber.org/zap/field.go
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "fmt"
- "math"
- "time"
-
- "go.uber.org/zap/zapcore"
-)
-
-// Field is an alias for Field. Aliasing this type dramatically
-// improves the navigability of this package's API documentation.
-type Field = zapcore.Field
-
-var (
- _minTimeInt64 = time.Unix(0, math.MinInt64)
- _maxTimeInt64 = time.Unix(0, math.MaxInt64)
-)
-
-// Skip constructs a no-op field, which is often useful when handling invalid
-// inputs in other Field constructors.
-func Skip() Field {
- return Field{Type: zapcore.SkipType}
-}
-
-// nilField returns a field which will marshal explicitly as nil. See motivation
-// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
-// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
-// implementation here should be changed to reflect that.
-func nilField(key string) Field { return Reflect(key, nil) }
-
-// Binary constructs a field that carries an opaque binary blob.
-//
-// Binary data is serialized in an encoding-appropriate format. For example,
-// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
-// use ByteString.
-func Binary(key string, val []byte) Field {
- return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
-}
-
-// Bool constructs a field that carries a bool.
-func Bool(key string, val bool) Field {
- var ival int64
- if val {
- ival = 1
- }
- return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
-}
-
-// Boolp constructs a field that carries a *bool. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Boolp(key string, val *bool) Field {
- if val == nil {
- return nilField(key)
- }
- return Bool(key, *val)
-}
-
-// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
-// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
-// Binary.
-func ByteString(key string, val []byte) Field {
- return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
-}
-
-// Complex128 constructs a field that carries a complex number. Unlike most
-// numeric fields, this costs an allocation (to convert the complex128 to
-// interface{}).
-func Complex128(key string, val complex128) Field {
- return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
-}
-
-// Complex128p constructs a field that carries a *complex128. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Complex128p(key string, val *complex128) Field {
- if val == nil {
- return nilField(key)
- }
- return Complex128(key, *val)
-}
-
-// Complex64 constructs a field that carries a complex number. Unlike most
-// numeric fields, this costs an allocation (to convert the complex64 to
-// interface{}).
-func Complex64(key string, val complex64) Field {
- return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
-}
-
-// Complex64p constructs a field that carries a *complex64. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Complex64p(key string, val *complex64) Field {
- if val == nil {
- return nilField(key)
- }
- return Complex64(key, *val)
-}
-
-// Float64 constructs a field that carries a float64. The way the
-// floating-point value is represented is encoder-dependent, so marshaling is
-// necessarily lazy.
-func Float64(key string, val float64) Field {
- return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
-}
-
-// Float64p constructs a field that carries a *float64. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Float64p(key string, val *float64) Field {
- if val == nil {
- return nilField(key)
- }
- return Float64(key, *val)
-}
-
-// Float32 constructs a field that carries a float32. The way the
-// floating-point value is represented is encoder-dependent, so marshaling is
-// necessarily lazy.
-func Float32(key string, val float32) Field {
- return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
-}
-
-// Float32p constructs a field that carries a *float32. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Float32p(key string, val *float32) Field {
- if val == nil {
- return nilField(key)
- }
- return Float32(key, *val)
-}
-
-// Int constructs a field with the given key and value.
-func Int(key string, val int) Field {
- return Int64(key, int64(val))
-}
-
-// Intp constructs a field that carries a *int. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Intp(key string, val *int) Field {
- if val == nil {
- return nilField(key)
- }
- return Int(key, *val)
-}
-
-// Int64 constructs a field with the given key and value.
-func Int64(key string, val int64) Field {
- return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
-}
-
-// Int64p constructs a field that carries a *int64. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Int64p(key string, val *int64) Field {
- if val == nil {
- return nilField(key)
- }
- return Int64(key, *val)
-}
-
-// Int32 constructs a field with the given key and value.
-func Int32(key string, val int32) Field {
- return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
-}
-
-// Int32p constructs a field that carries a *int32. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Int32p(key string, val *int32) Field {
- if val == nil {
- return nilField(key)
- }
- return Int32(key, *val)
-}
-
-// Int16 constructs a field with the given key and value.
-func Int16(key string, val int16) Field {
- return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
-}
-
-// Int16p constructs a field that carries a *int16. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Int16p(key string, val *int16) Field {
- if val == nil {
- return nilField(key)
- }
- return Int16(key, *val)
-}
-
-// Int8 constructs a field with the given key and value.
-func Int8(key string, val int8) Field {
- return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
-}
-
-// Int8p constructs a field that carries a *int8. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Int8p(key string, val *int8) Field {
- if val == nil {
- return nilField(key)
- }
- return Int8(key, *val)
-}
-
-// String constructs a field with the given key and value.
-func String(key string, val string) Field {
- return Field{Key: key, Type: zapcore.StringType, String: val}
-}
-
-// Stringp constructs a field that carries a *string. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Stringp(key string, val *string) Field {
- if val == nil {
- return nilField(key)
- }
- return String(key, *val)
-}
-
-// Uint constructs a field with the given key and value.
-func Uint(key string, val uint) Field {
- return Uint64(key, uint64(val))
-}
-
-// Uintp constructs a field that carries a *uint. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uintp(key string, val *uint) Field {
- if val == nil {
- return nilField(key)
- }
- return Uint(key, *val)
-}
-
-// Uint64 constructs a field with the given key and value.
-func Uint64(key string, val uint64) Field {
- return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
-}
-
-// Uint64p constructs a field that carries a *uint64. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uint64p(key string, val *uint64) Field {
- if val == nil {
- return nilField(key)
- }
- return Uint64(key, *val)
-}
-
-// Uint32 constructs a field with the given key and value.
-func Uint32(key string, val uint32) Field {
- return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
-}
-
-// Uint32p constructs a field that carries a *uint32. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uint32p(key string, val *uint32) Field {
- if val == nil {
- return nilField(key)
- }
- return Uint32(key, *val)
-}
-
-// Uint16 constructs a field with the given key and value.
-func Uint16(key string, val uint16) Field {
- return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
-}
-
-// Uint16p constructs a field that carries a *uint16. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uint16p(key string, val *uint16) Field {
- if val == nil {
- return nilField(key)
- }
- return Uint16(key, *val)
-}
-
-// Uint8 constructs a field with the given key and value.
-func Uint8(key string, val uint8) Field {
- return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
-}
-
-// Uint8p constructs a field that carries a *uint8. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uint8p(key string, val *uint8) Field {
- if val == nil {
- return nilField(key)
- }
- return Uint8(key, *val)
-}
-
-// Uintptr constructs a field with the given key and value.
-func Uintptr(key string, val uintptr) Field {
- return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
-}
-
-// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Uintptrp(key string, val *uintptr) Field {
- if val == nil {
- return nilField(key)
- }
- return Uintptr(key, *val)
-}
-
-// Reflect constructs a field with the given key and an arbitrary object. It uses
-// an encoding-appropriate, reflection-based function to lazily serialize nearly
-// any object into the logging context, but it's relatively slow and
-// allocation-heavy. Outside tests, Any is always a better choice.
-//
-// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
-// includes the error message in the final log output.
-func Reflect(key string, val interface{}) Field {
- return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
-}
-
-// Namespace creates a named, isolated scope within the logger's context. All
-// subsequent fields will be added to the new namespace.
-//
-// This helps prevent key collisions when injecting loggers into sub-components
-// or third-party libraries.
-func Namespace(key string) Field {
- return Field{Key: key, Type: zapcore.NamespaceType}
-}
-
-// Stringer constructs a field with the given key and the output of the value's
-// String method. The Stringer's String method is called lazily.
-func Stringer(key string, val fmt.Stringer) Field {
- return Field{Key: key, Type: zapcore.StringerType, Interface: val}
-}
-
-// Time constructs a Field with the given key and value. The encoder
-// controls how the time is serialized.
-func Time(key string, val time.Time) Field {
- if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
- return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
- }
- return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
-}
-
-// Timep constructs a field that carries a *time.Time. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Timep(key string, val *time.Time) Field {
- if val == nil {
- return nilField(key)
- }
- return Time(key, *val)
-}
-
-// Stack constructs a field that stores a stacktrace of the current goroutine
-// under provided key. Keep in mind that taking a stacktrace is eager and
-// expensive (relatively speaking); this function both makes an allocation and
-// takes about two microseconds.
-func Stack(key string) Field {
- return StackSkip(key, 1) // skip Stack
-}
-
-// StackSkip constructs a field similarly to Stack, but also skips the given
-// number of frames from the top of the stacktrace.
-func StackSkip(key string, skip int) Field {
- // Returning the stacktrace as a string costs an allocation, but saves us
- // from expanding the zapcore.Field union struct to include a byte slice. Since
- // taking a stacktrace is already so expensive (~10us), the extra allocation
- // is okay.
- return String(key, takeStacktrace(skip+1)) // skip StackSkip
-}
-
-// Duration constructs a field with the given key and value. The encoder
-// controls how the duration is serialized.
-func Duration(key string, val time.Duration) Field {
- return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
-}
-
-// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
-// and explicitly represent `nil` when appropriate.
-func Durationp(key string, val *time.Duration) Field {
- if val == nil {
- return nilField(key)
- }
- return Duration(key, *val)
-}
-
-// Object constructs a field with the given key and ObjectMarshaler. It
-// provides a flexible, but still type-safe and efficient, way to add map- or
-// struct-like user-defined types to the logging context. The struct's
-// MarshalLogObject method is called lazily.
-func Object(key string, val zapcore.ObjectMarshaler) Field {
- return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
-}
-
-// Inline constructs a Field that is similar to Object, but it
-// will add the elements of the provided ObjectMarshaler to the
-// current namespace.
-func Inline(val zapcore.ObjectMarshaler) Field {
- return zapcore.Field{
- Type: zapcore.InlineMarshalerType,
- Interface: val,
- }
-}
-
-// Any takes a key and an arbitrary value and chooses the best way to represent
-// them as a field, falling back to a reflection-based approach only if
-// necessary.
-//
-// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
-// them. To minimize surprises, []byte values are treated as binary blobs, byte
-// values are treated as uint8, and runes are always treated as integers.
-func Any(key string, value interface{}) Field {
- switch val := value.(type) {
- case zapcore.ObjectMarshaler:
- return Object(key, val)
- case zapcore.ArrayMarshaler:
- return Array(key, val)
- case bool:
- return Bool(key, val)
- case *bool:
- return Boolp(key, val)
- case []bool:
- return Bools(key, val)
- case complex128:
- return Complex128(key, val)
- case *complex128:
- return Complex128p(key, val)
- case []complex128:
- return Complex128s(key, val)
- case complex64:
- return Complex64(key, val)
- case *complex64:
- return Complex64p(key, val)
- case []complex64:
- return Complex64s(key, val)
- case float64:
- return Float64(key, val)
- case *float64:
- return Float64p(key, val)
- case []float64:
- return Float64s(key, val)
- case float32:
- return Float32(key, val)
- case *float32:
- return Float32p(key, val)
- case []float32:
- return Float32s(key, val)
- case int:
- return Int(key, val)
- case *int:
- return Intp(key, val)
- case []int:
- return Ints(key, val)
- case int64:
- return Int64(key, val)
- case *int64:
- return Int64p(key, val)
- case []int64:
- return Int64s(key, val)
- case int32:
- return Int32(key, val)
- case *int32:
- return Int32p(key, val)
- case []int32:
- return Int32s(key, val)
- case int16:
- return Int16(key, val)
- case *int16:
- return Int16p(key, val)
- case []int16:
- return Int16s(key, val)
- case int8:
- return Int8(key, val)
- case *int8:
- return Int8p(key, val)
- case []int8:
- return Int8s(key, val)
- case string:
- return String(key, val)
- case *string:
- return Stringp(key, val)
- case []string:
- return Strings(key, val)
- case uint:
- return Uint(key, val)
- case *uint:
- return Uintp(key, val)
- case []uint:
- return Uints(key, val)
- case uint64:
- return Uint64(key, val)
- case *uint64:
- return Uint64p(key, val)
- case []uint64:
- return Uint64s(key, val)
- case uint32:
- return Uint32(key, val)
- case *uint32:
- return Uint32p(key, val)
- case []uint32:
- return Uint32s(key, val)
- case uint16:
- return Uint16(key, val)
- case *uint16:
- return Uint16p(key, val)
- case []uint16:
- return Uint16s(key, val)
- case uint8:
- return Uint8(key, val)
- case *uint8:
- return Uint8p(key, val)
- case []byte:
- return Binary(key, val)
- case uintptr:
- return Uintptr(key, val)
- case *uintptr:
- return Uintptrp(key, val)
- case []uintptr:
- return Uintptrs(key, val)
- case time.Time:
- return Time(key, val)
- case *time.Time:
- return Timep(key, val)
- case []time.Time:
- return Times(key, val)
- case time.Duration:
- return Duration(key, val)
- case *time.Duration:
- return Durationp(key, val)
- case []time.Duration:
- return Durations(key, val)
- case error:
- return NamedError(key, val)
- case []error:
- return Errors(key, val)
- case fmt.Stringer:
- return Stringer(key, val)
- default:
- return Reflect(key, val)
- }
-}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
deleted file mode 100644
index 1312875072..0000000000
--- a/vendor/go.uber.org/zap/flag.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "flag"
-
- "go.uber.org/zap/zapcore"
-)
-
-// LevelFlag uses the standard library's flag.Var to declare a global flag
-// with the specified name, default, and usage guidance. The returned value is
-// a pointer to the value of the flag.
-//
-// If you don't want to use the flag package's global state, you can use any
-// non-nil *Level as a flag.Value with your own *flag.FlagSet.
-func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
- lvl := defaultLevel
- flag.Var(&lvl, name, usage)
- return &lvl
-}
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
deleted file mode 100644
index 8e1d05e9ab..0000000000
--- a/vendor/go.uber.org/zap/glide.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-package: go.uber.org/zap
-license: MIT
-import:
-- package: go.uber.org/atomic
- version: ^1
-- package: go.uber.org/multierr
- version: ^1
-testImport:
-- package: github.com/satori/go.uuid
-- package: github.com/sirupsen/logrus
-- package: github.com/apex/log
- subpackages:
- - handlers/json
-- package: github.com/go-kit/kit
- subpackages:
- - log
-- package: github.com/stretchr/testify
- subpackages:
- - assert
- - require
-- package: gopkg.in/inconshreveable/log15.v2
-- package: github.com/mattn/goveralls
-- package: github.com/pborman/uuid
-- package: github.com/pkg/errors
-- package: github.com/rs/zerolog
-- package: golang.org/x/tools
- subpackages:
- - cover
-- package: golang.org/x/lint
- subpackages:
- - golint
-- package: github.com/axw/gocov
- subpackages:
- - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
deleted file mode 100644
index 3cb46c9e0a..0000000000
--- a/vendor/go.uber.org/zap/global.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "bytes"
- "fmt"
- "log"
- "os"
- "sync"
-
- "go.uber.org/zap/zapcore"
-)
-
-const (
- _stdLogDefaultDepth = 1
- _loggerWriterDepth = 2
- _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
- "https://github.com/uber-go/zap/issues/new and reference this error: %v"
-)
-
-var (
- _globalMu sync.RWMutex
- _globalL = NewNop()
- _globalS = _globalL.Sugar()
-)
-
-// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
-// It's safe for concurrent use.
-func L() *Logger {
- _globalMu.RLock()
- l := _globalL
- _globalMu.RUnlock()
- return l
-}
-
-// S returns the global SugaredLogger, which can be reconfigured with
-// ReplaceGlobals. It's safe for concurrent use.
-func S() *SugaredLogger {
- _globalMu.RLock()
- s := _globalS
- _globalMu.RUnlock()
- return s
-}
-
-// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
-// function to restore the original values. It's safe for concurrent use.
-func ReplaceGlobals(logger *Logger) func() {
- _globalMu.Lock()
- prev := _globalL
- _globalL = logger
- _globalS = logger.Sugar()
- _globalMu.Unlock()
- return func() { ReplaceGlobals(prev) }
-}
-
-// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
-// InfoLevel. To redirect the standard library's package-global logging
-// functions, use RedirectStdLog instead.
-func NewStdLog(l *Logger) *log.Logger {
- logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
- f := logger.Info
- return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
-}
-
-// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
-// required level.
-func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
- logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
- logFunc, err := levelToFunc(logger, level)
- if err != nil {
- return nil, err
- }
- return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
-}
-
-// RedirectStdLog redirects output from the standard library's package-global
-// logger to the supplied logger at InfoLevel. Since zap already handles caller
-// annotations, timestamps, etc., it automatically disables the standard
-// library's annotations and prefixing.
-//
-// It returns a function to restore the original prefix and flags and reset the
-// standard library's output to os.Stderr.
-func RedirectStdLog(l *Logger) func() {
- f, err := redirectStdLogAt(l, InfoLevel)
- if err != nil {
- // Can't get here, since passing InfoLevel to redirectStdLogAt always
- // works.
- panic(fmt.Sprintf(_programmerErrorTemplate, err))
- }
- return f
-}
-
-// RedirectStdLogAt redirects output from the standard library's package-global
-// logger to the supplied logger at the specified level. Since zap already
-// handles caller annotations, timestamps, etc., it automatically disables the
-// standard library's annotations and prefixing.
-//
-// It returns a function to restore the original prefix and flags and reset the
-// standard library's output to os.Stderr.
-func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
- return redirectStdLogAt(l, level)
-}
-
-func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
- flags := log.Flags()
- prefix := log.Prefix()
- log.SetFlags(0)
- log.SetPrefix("")
- logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
- logFunc, err := levelToFunc(logger, level)
- if err != nil {
- return nil, err
- }
- log.SetOutput(&loggerWriter{logFunc})
- return func() {
- log.SetFlags(flags)
- log.SetPrefix(prefix)
- log.SetOutput(os.Stderr)
- }, nil
-}
-
-func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
- switch lvl {
- case DebugLevel:
- return logger.Debug, nil
- case InfoLevel:
- return logger.Info, nil
- case WarnLevel:
- return logger.Warn, nil
- case ErrorLevel:
- return logger.Error, nil
- case DPanicLevel:
- return logger.DPanic, nil
- case PanicLevel:
- return logger.Panic, nil
- case FatalLevel:
- return logger.Fatal, nil
- }
- return nil, fmt.Errorf("unrecognized level: %q", lvl)
-}
-
-type loggerWriter struct {
- logFunc func(msg string, fields ...Field)
-}
-
-func (l *loggerWriter) Write(p []byte) (int, error) {
- p = bytes.TrimSpace(p)
- l.logFunc(string(p))
- return len(p), nil
-}
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
deleted file mode 100644
index 632b6831a8..0000000000
--- a/vendor/go.uber.org/zap/http_handler.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
-
- "go.uber.org/zap/zapcore"
-)
-
-// ServeHTTP is a simple JSON endpoint that can report on or change the current
-// logging level.
-//
-// # GET
-//
-// The GET request returns a JSON description of the current logging level like:
-//
-// {"level":"info"}
-//
-// # PUT
-//
-// The PUT request changes the logging level. It is perfectly safe to change the
-// logging level while a program is running. Two content types are supported:
-//
-// Content-Type: application/x-www-form-urlencoded
-//
-// With this content type, the level can be provided through the request body or
-// a query parameter. The log level is URL encoded like:
-//
-// level=debug
-//
-// The request body takes precedence over the query parameter, if both are
-// specified.
-//
-// This content type is the default for a curl PUT request. Following are two
-// example curl requests that both set the logging level to debug.
-//
-// curl -X PUT localhost:8080/log/level?level=debug
-// curl -X PUT localhost:8080/log/level -d level=debug
-//
-// For any other content type, the payload is expected to be JSON encoded and
-// look like:
-//
-// {"level":"info"}
-//
-// An example curl request could look like this:
-//
-// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- type errorResponse struct {
- Error string `json:"error"`
- }
- type payload struct {
- Level zapcore.Level `json:"level"`
- }
-
- enc := json.NewEncoder(w)
-
- switch r.Method {
- case http.MethodGet:
- enc.Encode(payload{Level: lvl.Level()})
- case http.MethodPut:
- requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
- if err != nil {
- w.WriteHeader(http.StatusBadRequest)
- enc.Encode(errorResponse{Error: err.Error()})
- return
- }
- lvl.SetLevel(requestedLvl)
- enc.Encode(payload{Level: lvl.Level()})
- default:
- w.WriteHeader(http.StatusMethodNotAllowed)
- enc.Encode(errorResponse{
- Error: "Only GET and PUT are supported.",
- })
- }
-}
-
-// Decodes incoming PUT requests and returns the requested logging level.
-func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
- if contentType == "application/x-www-form-urlencoded" {
- return decodePutURL(r)
- }
- return decodePutJSON(r.Body)
-}
-
-func decodePutURL(r *http.Request) (zapcore.Level, error) {
- lvl := r.FormValue("level")
- if lvl == "" {
- return 0, errors.New("must specify logging level")
- }
- var l zapcore.Level
- if err := l.UnmarshalText([]byte(lvl)); err != nil {
- return 0, err
- }
- return l, nil
-}
-
-func decodePutJSON(body io.Reader) (zapcore.Level, error) {
- var pld struct {
- Level *zapcore.Level `json:"level"`
- }
- if err := json.NewDecoder(body).Decode(&pld); err != nil {
- return 0, fmt.Errorf("malformed request body: %v", err)
- }
- if pld.Level == nil {
- return 0, errors.New("must specify logging level")
- }
- return *pld.Level, nil
-
-}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
deleted file mode 100644
index dad583aaa5..0000000000
--- a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package bufferpool houses zap's shared internal buffer pool. Third-party
-// packages can recreate the same functionality with buffers.NewPool.
-package bufferpool
-
-import "go.uber.org/zap/buffer"
-
-var (
- _pool = buffer.NewPool()
- // Get retrieves a buffer from the pool, creating one if necessary.
- Get = _pool.Get
-)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
deleted file mode 100644
index c4d5d02abc..0000000000
--- a/vendor/go.uber.org/zap/internal/color/color.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package color adds coloring functionality for TTY output.
-package color
-
-import "fmt"
-
-// Foreground colors.
-const (
- Black Color = iota + 30
- Red
- Green
- Yellow
- Blue
- Magenta
- Cyan
- White
-)
-
-// Color represents a text color.
-type Color uint8
-
-// Add adds the coloring to the given string.
-func (c Color) Add(s string) string {
- return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
-}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
deleted file mode 100644
index f673f9947b..0000000000
--- a/vendor/go.uber.org/zap/internal/exit/exit.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package exit provides stubs so that unit tests can exercise code that calls
-// os.Exit(1).
-package exit
-
-import "os"
-
-var _exit = os.Exit
-
-// With terminates the process by calling os.Exit(code). If the package is
-// stubbed, it instead records a call in the testing spy.
-func With(code int) {
- _exit(code)
-}
-
-// A StubbedExit is a testing fake for os.Exit.
-type StubbedExit struct {
- Exited bool
- Code int
- prev func(code int)
-}
-
-// Stub substitutes a fake for the call to os.Exit(1).
-func Stub() *StubbedExit {
- s := &StubbedExit{prev: _exit}
- _exit = s.exit
- return s
-}
-
-// WithStub runs the supplied function with Exit stubbed. It returns the stub
-// used, so that users can test whether the process would have crashed.
-func WithStub(f func()) *StubbedExit {
- s := Stub()
- defer s.Unstub()
- f()
- return s
-}
-
-// Unstub restores the previous exit function.
-func (se *StubbedExit) Unstub() {
- _exit = se.prev
-}
-
-func (se *StubbedExit) exit(code int) {
- se.Exited = true
- se.Code = code
-}
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
deleted file mode 100644
index 5f3e3f1b92..0000000000
--- a/vendor/go.uber.org/zap/internal/level_enabler.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package internal
-
-import "go.uber.org/zap/zapcore"
-
-// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
-// report their own level.
-//
-// This interface is defined to use more conveniently in tests and non-zapcore
-// packages.
-// This cannot be imported from zapcore because of the cyclic dependency.
-type LeveledEnabler interface {
- zapcore.LevelEnabler
-
- Level() zapcore.Level
-}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
deleted file mode 100644
index db951e19a5..0000000000
--- a/vendor/go.uber.org/zap/level.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "go.uber.org/atomic"
- "go.uber.org/zap/internal"
- "go.uber.org/zap/zapcore"
-)
-
-const (
- // DebugLevel logs are typically voluminous, and are usually disabled in
- // production.
- DebugLevel = zapcore.DebugLevel
- // InfoLevel is the default logging priority.
- InfoLevel = zapcore.InfoLevel
- // WarnLevel logs are more important than Info, but don't need individual
- // human review.
- WarnLevel = zapcore.WarnLevel
- // ErrorLevel logs are high-priority. If an application is running smoothly,
- // it shouldn't generate any error-level logs.
- ErrorLevel = zapcore.ErrorLevel
- // DPanicLevel logs are particularly important errors. In development the
- // logger panics after writing the message.
- DPanicLevel = zapcore.DPanicLevel
- // PanicLevel logs a message, then panics.
- PanicLevel = zapcore.PanicLevel
- // FatalLevel logs a message, then calls os.Exit(1).
- FatalLevel = zapcore.FatalLevel
-)
-
-// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
-// an anonymous function.
-//
-// It's particularly useful when splitting log output between different
-// outputs (e.g., standard error and standard out). For sample code, see the
-// package-level AdvancedConfiguration example.
-type LevelEnablerFunc func(zapcore.Level) bool
-
-// Enabled calls the wrapped function.
-func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
-
-// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
-// you safely change the log level of a tree of loggers (the root logger and
-// any children created by adding context) at runtime.
-//
-// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
-// alter its level.
-//
-// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
-// their internal atomic pointer.
-type AtomicLevel struct {
- l *atomic.Int32
-}
-
-var _ internal.LeveledEnabler = AtomicLevel{}
-
-// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
-// enabled.
-func NewAtomicLevel() AtomicLevel {
- return AtomicLevel{
- l: atomic.NewInt32(int32(InfoLevel)),
- }
-}
-
-// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
-// and then calls SetLevel with the given level.
-func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
- a := NewAtomicLevel()
- a.SetLevel(l)
- return a
-}
-
-// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
-// representation of the log level. If the provided ASCII representation is
-// invalid an error is returned.
-//
-// This is particularly useful when dealing with text input to configure log
-// levels.
-func ParseAtomicLevel(text string) (AtomicLevel, error) {
- a := NewAtomicLevel()
- l, err := zapcore.ParseLevel(text)
- if err != nil {
- return a, err
- }
-
- a.SetLevel(l)
- return a, nil
-}
-
-// Enabled implements the zapcore.LevelEnabler interface, which allows the
-// AtomicLevel to be used in place of traditional static levels.
-func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
- return lvl.Level().Enabled(l)
-}
-
-// Level returns the minimum enabled log level.
-func (lvl AtomicLevel) Level() zapcore.Level {
- return zapcore.Level(int8(lvl.l.Load()))
-}
-
-// SetLevel alters the logging level.
-func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
- lvl.l.Store(int32(l))
-}
-
-// String returns the string representation of the underlying Level.
-func (lvl AtomicLevel) String() string {
- return lvl.Level().String()
-}
-
-// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
-// representations as the static zapcore.Levels ("debug", "info", "warn",
-// "error", "dpanic", "panic", and "fatal").
-func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
- if lvl.l == nil {
- lvl.l = &atomic.Int32{}
- }
-
- var l zapcore.Level
- if err := l.UnmarshalText(text); err != nil {
- return err
- }
-
- lvl.SetLevel(l)
- return nil
-}
-
-// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
-// text representation as the static zapcore.Levels ("debug", "info", "warn",
-// "error", "dpanic", "panic", and "fatal").
-func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
- return lvl.Level().MarshalText()
-}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
deleted file mode 100644
index cd44030d13..0000000000
--- a/vendor/go.uber.org/zap/logger.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-
- "go.uber.org/zap/internal/bufferpool"
- "go.uber.org/zap/zapcore"
-)
-
-// A Logger provides fast, leveled, structured logging. All methods are safe
-// for concurrent use.
-//
-// The Logger is designed for contexts in which every microsecond and every
-// allocation matters, so its API intentionally favors performance and type
-// safety over brevity. For most applications, the SugaredLogger strikes a
-// better balance between performance and ergonomics.
-type Logger struct {
- core zapcore.Core
-
- development bool
- addCaller bool
- onFatal zapcore.CheckWriteHook // default is WriteThenFatal
-
- name string
- errorOutput zapcore.WriteSyncer
-
- addStack zapcore.LevelEnabler
-
- callerSkip int
-
- clock zapcore.Clock
-}
-
-// New constructs a new Logger from the provided zapcore.Core and Options. If
-// the passed zapcore.Core is nil, it falls back to using a no-op
-// implementation.
-//
-// This is the most flexible way to construct a Logger, but also the most
-// verbose. For typical use cases, the highly-opinionated presets
-// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
-// more convenient.
-//
-// For sample code, see the package-level AdvancedConfiguration example.
-func New(core zapcore.Core, options ...Option) *Logger {
- if core == nil {
- return NewNop()
- }
- log := &Logger{
- core: core,
- errorOutput: zapcore.Lock(os.Stderr),
- addStack: zapcore.FatalLevel + 1,
- clock: zapcore.DefaultClock,
- }
- return log.WithOptions(options...)
-}
-
-// NewNop returns a no-op Logger. It never writes out logs or internal errors,
-// and it never runs user-defined hooks.
-//
-// Using WithOptions to replace the Core or error output of a no-op Logger can
-// re-enable logging.
-func NewNop() *Logger {
- return &Logger{
- core: zapcore.NewNopCore(),
- errorOutput: zapcore.AddSync(io.Discard),
- addStack: zapcore.FatalLevel + 1,
- clock: zapcore.DefaultClock,
- }
-}
-
-// NewProduction builds a sensible production Logger that writes InfoLevel and
-// above logs to standard error as JSON.
-//
-// It's a shortcut for NewProductionConfig().Build(...Option).
-func NewProduction(options ...Option) (*Logger, error) {
- return NewProductionConfig().Build(options...)
-}
-
-// NewDevelopment builds a development Logger that writes DebugLevel and above
-// logs to standard error in a human-friendly format.
-//
-// It's a shortcut for NewDevelopmentConfig().Build(...Option).
-func NewDevelopment(options ...Option) (*Logger, error) {
- return NewDevelopmentConfig().Build(options...)
-}
-
-// Must is a helper that wraps a call to a function returning (*Logger, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initialization such as:
-//
-// var logger = zap.Must(zap.NewProduction())
-func Must(logger *Logger, err error) *Logger {
- if err != nil {
- panic(err)
- }
-
- return logger
-}
-
-// NewExample builds a Logger that's designed for use in zap's testable
-// examples. It writes DebugLevel and above logs to standard out as JSON, but
-// omits the timestamp and calling function to keep example output
-// short and deterministic.
-func NewExample(options ...Option) *Logger {
- encoderCfg := zapcore.EncoderConfig{
- MessageKey: "msg",
- LevelKey: "level",
- NameKey: "logger",
- EncodeLevel: zapcore.LowercaseLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
- EncodeDuration: zapcore.StringDurationEncoder,
- }
- core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
- return New(core).WithOptions(options...)
-}
-
-// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
-// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
-// single application to use both Loggers and SugaredLoggers, converting
-// between them on the boundaries of performance-sensitive code.
-func (log *Logger) Sugar() *SugaredLogger {
- core := log.clone()
- core.callerSkip += 2
- return &SugaredLogger{core}
-}
-
-// Named adds a new path segment to the logger's name. Segments are joined by
-// periods. By default, Loggers are unnamed.
-func (log *Logger) Named(s string) *Logger {
- if s == "" {
- return log
- }
- l := log.clone()
- if log.name == "" {
- l.name = s
- } else {
- l.name = strings.Join([]string{l.name, s}, ".")
- }
- return l
-}
-
-// WithOptions clones the current Logger, applies the supplied Options, and
-// returns the resulting Logger. It's safe to use concurrently.
-func (log *Logger) WithOptions(opts ...Option) *Logger {
- c := log.clone()
- for _, opt := range opts {
- opt.apply(c)
- }
- return c
-}
-
-// With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
-func (log *Logger) With(fields ...Field) *Logger {
- if len(fields) == 0 {
- return log
- }
- l := log.clone()
- l.core = l.core.With(fields)
- return l
-}
-
-// Level reports the minimum enabled level for this logger.
-//
-// For NopLoggers, this is [zapcore.InvalidLevel].
-func (log *Logger) Level() zapcore.Level {
- return zapcore.LevelOf(log.core)
-}
-
-// Check returns a CheckedEntry if logging a message at the specified level
-// is enabled. It's a completely optional optimization; in high-performance
-// applications, Check can help avoid allocating a slice to hold fields.
-func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- return log.check(lvl, msg)
-}
-
-// Log logs a message at the specified level. The message includes any fields
-// passed at the log site, as well as any fields accumulated on the logger.
-func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
- if ce := log.check(lvl, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Debug logs a message at DebugLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-func (log *Logger) Debug(msg string, fields ...Field) {
- if ce := log.check(DebugLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Info logs a message at InfoLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-func (log *Logger) Info(msg string, fields ...Field) {
- if ce := log.check(InfoLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Warn logs a message at WarnLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-func (log *Logger) Warn(msg string, fields ...Field) {
- if ce := log.check(WarnLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Error logs a message at ErrorLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-func (log *Logger) Error(msg string, fields ...Field) {
- if ce := log.check(ErrorLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// DPanic logs a message at DPanicLevel. The message includes any fields
-// passed at the log site, as well as any fields accumulated on the logger.
-//
-// If the logger is in development mode, it then panics (DPanic means
-// "development panic"). This is useful for catching errors that are
-// recoverable, but shouldn't ever happen.
-func (log *Logger) DPanic(msg string, fields ...Field) {
- if ce := log.check(DPanicLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Panic logs a message at PanicLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-//
-// The logger then panics, even if logging at PanicLevel is disabled.
-func (log *Logger) Panic(msg string, fields ...Field) {
- if ce := log.check(PanicLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Fatal logs a message at FatalLevel. The message includes any fields passed
-// at the log site, as well as any fields accumulated on the logger.
-//
-// The logger then calls os.Exit(1), even if logging at FatalLevel is
-// disabled.
-func (log *Logger) Fatal(msg string, fields ...Field) {
- if ce := log.check(FatalLevel, msg); ce != nil {
- ce.Write(fields...)
- }
-}
-
-// Sync calls the underlying Core's Sync method, flushing any buffered log
-// entries. Applications should take care to call Sync before exiting.
-func (log *Logger) Sync() error {
- return log.core.Sync()
-}
-
-// Core returns the Logger's underlying zapcore.Core.
-func (log *Logger) Core() zapcore.Core {
- return log.core
-}
-
-func (log *Logger) clone() *Logger {
- copy := *log
- return ©
-}
-
-func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // Logger.check must always be called directly by a method in the
- // Logger interface (e.g., Check, Info, Fatal).
- // This skips Logger.check and the Info/Fatal/Check/etc. method that
- // called it.
- const callerSkipOffset = 2
-
- // Check the level first to reduce the cost of disabled log calls.
- // Since Panic and higher may exit, we skip the optimization for those levels.
- if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
- return nil
- }
-
- // Create basic checked entry thru the core; this will be non-nil if the
- // log message will actually be written somewhere.
- ent := zapcore.Entry{
- LoggerName: log.name,
- Time: log.clock.Now(),
- Level: lvl,
- Message: msg,
- }
- ce := log.core.Check(ent, nil)
- willWrite := ce != nil
-
- // Set up any required terminal behavior.
- switch ent.Level {
- case zapcore.PanicLevel:
- ce = ce.After(ent, zapcore.WriteThenPanic)
- case zapcore.FatalLevel:
- onFatal := log.onFatal
- // nil or WriteThenNoop will lead to continued execution after
- // a Fatal log entry, which is unexpected. For example,
- //
- // f, err := os.Open(..)
- // if err != nil {
- // log.Fatal("cannot open", zap.Error(err))
- // }
- // fmt.Println(f.Name())
- //
- // The f.Name() will panic if we continue execution after the
- // log.Fatal.
- if onFatal == nil || onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.After(ent, onFatal)
- case zapcore.DPanicLevel:
- if log.development {
- ce = ce.After(ent, zapcore.WriteThenPanic)
- }
- }
-
- // Only do further annotation if we're going to write this message; checked
- // entries that exist only for terminal behavior don't benefit from
- // annotation.
- if !willWrite {
- return ce
- }
-
- // Thread the error output through to the CheckedEntry.
- ce.ErrorOutput = log.errorOutput
-
- addStack := log.addStack.Enabled(ce.Level)
- if !log.addCaller && !addStack {
- return ce
- }
-
- // Adding the caller or stack trace requires capturing the callers of
- // this function. We'll share information between these two.
- stackDepth := stacktraceFirst
- if addStack {
- stackDepth = stacktraceFull
- }
- stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
- defer stack.Free()
-
- if stack.Count() == 0 {
- if log.addCaller {
- fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
- log.errorOutput.Sync()
- }
- return ce
- }
-
- frame, more := stack.Next()
-
- if log.addCaller {
- ce.Caller = zapcore.EntryCaller{
- Defined: frame.PC != 0,
- PC: frame.PC,
- File: frame.File,
- Line: frame.Line,
- Function: frame.Function,
- }
- }
-
- if addStack {
- buffer := bufferpool.Get()
- defer buffer.Free()
-
- stackfmt := newStackFormatter(buffer)
-
- // We've already extracted the first frame, so format that
- // separately and defer to stackfmt for the rest.
- stackfmt.FormatFrame(frame)
- if more {
- stackfmt.FormatStack(stack)
- }
- ce.Stack = buffer.String()
- }
-
- return ce
-}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
deleted file mode 100644
index c4f3bca3d2..0000000000
--- a/vendor/go.uber.org/zap/options.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "fmt"
-
- "go.uber.org/zap/zapcore"
-)
-
-// An Option configures a Logger.
-type Option interface {
- apply(*Logger)
-}
-
-// optionFunc wraps a func so it satisfies the Option interface.
-type optionFunc func(*Logger)
-
-func (f optionFunc) apply(log *Logger) {
- f(log)
-}
-
-// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
-func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
- return optionFunc(func(log *Logger) {
- log.core = f(log.core)
- })
-}
-
-// Hooks registers functions which will be called each time the Logger writes
-// out an Entry. Repeated use of Hooks is additive.
-//
-// Hooks are useful for simple side effects, like capturing metrics for the
-// number of emitted logs. More complex side effects, including anything that
-// requires access to the Entry's structured fields, should be implemented as
-// a zapcore.Core instead. See zapcore.RegisterHooks for details.
-func Hooks(hooks ...func(zapcore.Entry) error) Option {
- return optionFunc(func(log *Logger) {
- log.core = zapcore.RegisterHooks(log.core, hooks...)
- })
-}
-
-// Fields adds fields to the Logger.
-func Fields(fs ...Field) Option {
- return optionFunc(func(log *Logger) {
- log.core = log.core.With(fs)
- })
-}
-
-// ErrorOutput sets the destination for errors generated by the Logger. Note
-// that this option only affects internal errors; for sample code that sends
-// error-level logs to a different location from info- and debug-level logs,
-// see the package-level AdvancedConfiguration example.
-//
-// The supplied WriteSyncer must be safe for concurrent use. The Open and
-// zapcore.Lock functions are the simplest ways to protect files with a mutex.
-func ErrorOutput(w zapcore.WriteSyncer) Option {
- return optionFunc(func(log *Logger) {
- log.errorOutput = w
- })
-}
-
-// Development puts the logger in development mode, which makes DPanic-level
-// logs panic instead of simply logging an error.
-func Development() Option {
- return optionFunc(func(log *Logger) {
- log.development = true
- })
-}
-
-// AddCaller configures the Logger to annotate each message with the filename,
-// line number, and function name of zap's caller. See also WithCaller.
-func AddCaller() Option {
- return WithCaller(true)
-}
-
-// WithCaller configures the Logger to annotate each message with the filename,
-// line number, and function name of zap's caller, or not, depending on the
-// value of enabled. This is a generalized form of AddCaller.
-func WithCaller(enabled bool) Option {
- return optionFunc(func(log *Logger) {
- log.addCaller = enabled
- })
-}
-
-// AddCallerSkip increases the number of callers skipped by caller annotation
-// (as enabled by the AddCaller option). When building wrappers around the
-// Logger and SugaredLogger, supplying this Option prevents zap from always
-// reporting the wrapper code as the caller.
-func AddCallerSkip(skip int) Option {
- return optionFunc(func(log *Logger) {
- log.callerSkip += skip
- })
-}
-
-// AddStacktrace configures the Logger to record a stack trace for all messages at
-// or above a given level.
-func AddStacktrace(lvl zapcore.LevelEnabler) Option {
- return optionFunc(func(log *Logger) {
- log.addStack = lvl
- })
-}
-
-// IncreaseLevel increase the level of the logger. It has no effect if
-// the passed in level tries to decrease the level of the logger.
-func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
- return optionFunc(func(log *Logger) {
- core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
- if err != nil {
- fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
- } else {
- log.core = core
- }
- })
-}
-
-// OnFatal sets the action to take on fatal logs.
-//
-// Deprecated: Use [WithFatalHook] instead.
-func OnFatal(action zapcore.CheckWriteAction) Option {
- return WithFatalHook(action)
-}
-
-// WithFatalHook sets a CheckWriteHook to run on fatal logs.
-// Zap will call this hook after writing a log statement with a Fatal level.
-//
-// For example, the following builds a logger that will exit the current
-// goroutine after writing a fatal log message, but it will not exit the
-// program.
-//
-// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
-//
-// It is important that the provided CheckWriteHook stops the control flow at
-// the current statement to meet expectations of callers of the logger.
-// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
-// minimum.
-func WithFatalHook(hook zapcore.CheckWriteHook) Option {
- return optionFunc(func(log *Logger) {
- log.onFatal = hook
- })
-}
-
-// WithClock specifies the clock used by the logger to determine the current
-// time for logged entries. Defaults to the system clock with time.Now.
-func WithClock(clock zapcore.Clock) Option {
- return optionFunc(func(log *Logger) {
- log.clock = clock
- })
-}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
deleted file mode 100644
index 478c9a10ff..0000000000
--- a/vendor/go.uber.org/zap/sink.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2016-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "errors"
- "fmt"
- "io"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- "go.uber.org/zap/zapcore"
-)
-
-const schemeFile = "file"
-
-var _sinkRegistry = newSinkRegistry()
-
-// Sink defines the interface to write to and close logger destinations.
-type Sink interface {
- zapcore.WriteSyncer
- io.Closer
-}
-
-type errSinkNotFound struct {
- scheme string
-}
-
-func (e *errSinkNotFound) Error() string {
- return fmt.Sprintf("no sink found for scheme %q", e.scheme)
-}
-
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
-type sinkRegistry struct {
- mu sync.Mutex
- factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
- openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
-}
-
-func newSinkRegistry() *sinkRegistry {
- sr := &sinkRegistry{
- factories: make(map[string]func(*url.URL) (Sink, error)),
- openFile: os.OpenFile,
- }
- sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
- return sr
-}
-
-// RegisterScheme registers the given factory for the specific scheme.
-func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- sr.mu.Lock()
- defer sr.mu.Unlock()
-
- if scheme == "" {
- return errors.New("can't register a sink factory for empty string")
- }
- normalized, err := normalizeScheme(scheme)
- if err != nil {
- return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
- }
- if _, ok := sr.factories[normalized]; ok {
- return fmt.Errorf("sink factory already registered for scheme %q", normalized)
- }
- sr.factories[normalized] = factory
- return nil
-}
-
-func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
- // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
- // the drive, and path is unset unless `c:/log.txt` is used.
- // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
- // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
- if filepath.IsAbs(rawURL) {
- return sr.newFileSinkFromPath(rawURL)
- }
-
- u, err := url.Parse(rawURL)
- if err != nil {
- return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
- }
- if u.Scheme == "" {
- u.Scheme = schemeFile
- }
-
- sr.mu.Lock()
- factory, ok := sr.factories[u.Scheme]
- sr.mu.Unlock()
- if !ok {
- return nil, &errSinkNotFound{u.Scheme}
- }
- return factory(u)
-}
-
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 0.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- return _sinkRegistry.RegisterSink(scheme, factory)
-}
-
-func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
- if u.User != nil {
- return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
- }
- if u.Fragment != "" {
- return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
- }
- if u.RawQuery != "" {
- return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
- }
- // Error messages are better if we check hostname and port separately.
- if u.Port() != "" {
- return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
- }
- if hn := u.Hostname(); hn != "" && hn != "localhost" {
- return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
- }
-
- return sr.newFileSinkFromPath(u.Path)
-}
-
-func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
- switch path {
- case "stdout":
- return nopCloserSink{os.Stdout}, nil
- case "stderr":
- return nopCloserSink{os.Stderr}, nil
- }
- return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
-}
-
-func normalizeScheme(s string) (string, error) {
- // https://tools.ietf.org/html/rfc3986#section-3.1
- s = strings.ToLower(s)
- if first := s[0]; 'a' > first || 'z' < first {
- return "", errors.New("must start with a letter")
- }
- for i := 1; i < len(s); i++ { // iterate over bytes, not runes
- c := s[i]
- switch {
- case 'a' <= c && c <= 'z':
- continue
- case '0' <= c && c <= '9':
- continue
- case c == '.' || c == '+' || c == '-':
- continue
- }
- return "", fmt.Errorf("may not contain %q", c)
- }
- return s, nil
-}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
deleted file mode 100644
index 817a3bde8b..0000000000
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "runtime"
- "sync"
-
- "go.uber.org/zap/buffer"
- "go.uber.org/zap/internal/bufferpool"
-)
-
-var _stacktracePool = sync.Pool{
- New: func() interface{} {
- return &stacktrace{
- storage: make([]uintptr, 64),
- }
- },
-}
-
-type stacktrace struct {
- pcs []uintptr // program counters; always a subslice of storage
- frames *runtime.Frames
-
- // The size of pcs varies depending on requirements:
- // it will be one if the only the first frame was requested,
- // and otherwise it will reflect the depth of the call stack.
- //
- // storage decouples the slice we need (pcs) from the slice we pool.
- // We will always allocate a reasonably large storage, but we'll use
- // only as much of it as we need.
- storage []uintptr
-}
-
-// stacktraceDepth specifies how deep of a stack trace should be captured.
-type stacktraceDepth int
-
-const (
- // stacktraceFirst captures only the first frame.
- stacktraceFirst stacktraceDepth = iota
-
- // stacktraceFull captures the entire call stack, allocating more
- // storage for it if needed.
- stacktraceFull
-)
-
-// captureStacktrace captures a stack trace of the specified depth, skipping
-// the provided number of frames. skip=0 identifies the caller of
-// captureStacktrace.
-//
-// The caller must call Free on the returned stacktrace after using it.
-func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
- stack := _stacktracePool.Get().(*stacktrace)
-
- switch depth {
- case stacktraceFirst:
- stack.pcs = stack.storage[:1]
- case stacktraceFull:
- stack.pcs = stack.storage
- }
-
- // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
- // itself. +2 to skip captureStacktrace and runtime.Callers.
- numFrames := runtime.Callers(
- skip+2,
- stack.pcs,
- )
-
- // runtime.Callers truncates the recorded stacktrace if there is no
- // room in the provided slice. For the full stack trace, keep expanding
- // storage until there are fewer frames than there is room.
- if depth == stacktraceFull {
- pcs := stack.pcs
- for numFrames == len(pcs) {
- pcs = make([]uintptr, len(pcs)*2)
- numFrames = runtime.Callers(skip+2, pcs)
- }
-
- // Discard old storage instead of returning it to the pool.
- // This will adjust the pool size over time if stack traces are
- // consistently very deep.
- stack.storage = pcs
- stack.pcs = pcs[:numFrames]
- } else {
- stack.pcs = stack.pcs[:numFrames]
- }
-
- stack.frames = runtime.CallersFrames(stack.pcs)
- return stack
-}
-
-// Free releases resources associated with this stacktrace
-// and returns it back to the pool.
-func (st *stacktrace) Free() {
- st.frames = nil
- st.pcs = nil
- _stacktracePool.Put(st)
-}
-
-// Count reports the total number of frames in this stacktrace.
-// Count DOES NOT change as Next is called.
-func (st *stacktrace) Count() int {
- return len(st.pcs)
-}
-
-// Next returns the next frame in the stack trace,
-// and a boolean indicating whether there are more after it.
-func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
- return st.frames.Next()
-}
-
-func takeStacktrace(skip int) string {
- stack := captureStacktrace(skip+1, stacktraceFull)
- defer stack.Free()
-
- buffer := bufferpool.Get()
- defer buffer.Free()
-
- stackfmt := newStackFormatter(buffer)
- stackfmt.FormatStack(stack)
- return buffer.String()
-}
-
-// stackFormatter formats a stack trace into a readable string representation.
-type stackFormatter struct {
- b *buffer.Buffer
- nonEmpty bool // whehther we've written at least one frame already
-}
-
-// newStackFormatter builds a new stackFormatter.
-func newStackFormatter(b *buffer.Buffer) stackFormatter {
- return stackFormatter{b: b}
-}
-
-// FormatStack formats all remaining frames in the provided stacktrace -- minus
-// the final runtime.main/runtime.goexit frame.
-func (sf *stackFormatter) FormatStack(stack *stacktrace) {
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := stack.Next(); more; frame, more = stack.Next() {
- sf.FormatFrame(frame)
- }
-}
-
-// FormatFrame formats the given frame.
-func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
- if sf.nonEmpty {
- sf.b.AppendByte('\n')
- }
- sf.nonEmpty = true
- sf.b.AppendString(frame.Function)
- sf.b.AppendByte('\n')
- sf.b.AppendByte('\t')
- sf.b.AppendString(frame.File)
- sf.b.AppendByte(':')
- sf.b.AppendInt(int64(frame.Line))
-}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
deleted file mode 100644
index ac387b3e47..0000000000
--- a/vendor/go.uber.org/zap/sugar.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "fmt"
-
- "go.uber.org/zap/zapcore"
-
- "go.uber.org/multierr"
-)
-
-const (
- _oddNumberErrMsg = "Ignored key without a value."
- _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
- _multipleErrMsg = "Multiple errors without a key."
-)
-
-// A SugaredLogger wraps the base Logger functionality in a slower, but less
-// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
-// method.
-//
-// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes four methods:
-//
-// - methods named after the log level for log.Print-style logging
-// - methods ending in "w" for loosely-typed structured logging
-// - methods ending in "f" for log.Printf-style logging
-// - methods ending in "ln" for log.Println-style logging
-//
-// For example, the methods for InfoLevel are:
-//
-// Info(...any) Print-style logging
-// Infow(...any) Structured logging (read as "info with")
-// Infof(string, ...any) Printf-style logging
-// Infoln(...any) Println-style logging
-type SugaredLogger struct {
- base *Logger
-}
-
-// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
-// is quite inexpensive, so it's reasonable for a single application to use
-// both Loggers and SugaredLoggers, converting between them on the boundaries
-// of performance-sensitive code.
-func (s *SugaredLogger) Desugar() *Logger {
- base := s.base.clone()
- base.callerSkip -= 2
- return base
-}
-
-// Named adds a sub-scope to the logger's name. See Logger.Named for details.
-func (s *SugaredLogger) Named(name string) *SugaredLogger {
- return &SugaredLogger{base: s.base.Named(name)}
-}
-
-// WithOptions clones the current SugaredLogger, applies the supplied Options,
-// and returns the result. It's safe to use concurrently.
-func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
- base := s.base.clone()
- for _, opt := range opts {
- opt.apply(base)
- }
- return &SugaredLogger{base: base}
-}
-
-// With adds a variadic number of fields to the logging context. It accepts a
-// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
-// processing pairs, the first element of the pair is used as the field key
-// and the second as the field value.
-//
-// For example,
-//
-// sugaredLogger.With(
-// "hello", "world",
-// "failure", errors.New("oh no"),
-// Stack(),
-// "count", 42,
-// "user", User{Name: "alice"},
-// )
-//
-// is the equivalent of
-//
-// unsugared.With(
-// String("hello", "world"),
-// String("failure", "oh no"),
-// Stack(),
-// Int("count", 42),
-// Object("user", User{Name: "alice"}),
-// )
-//
-// Note that the keys in key-value pairs should be strings. In development,
-// passing a non-string key panics. In production, the logger is more
-// forgiving: a separate error is logged, but the key-value pair is skipped
-// and execution continues. Passing an orphaned key triggers similar behavior:
-// panics in development and errors in production.
-func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
- return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
-}
-
-// Level reports the minimum enabled level for this logger.
-//
-// For NopLoggers, this is [zapcore.InvalidLevel].
-func (s *SugaredLogger) Level() zapcore.Level {
- return zapcore.LevelOf(s.base.core)
-}
-
-// Debug uses fmt.Sprint to construct and log a message.
-func (s *SugaredLogger) Debug(args ...interface{}) {
- s.log(DebugLevel, "", args, nil)
-}
-
-// Info uses fmt.Sprint to construct and log a message.
-func (s *SugaredLogger) Info(args ...interface{}) {
- s.log(InfoLevel, "", args, nil)
-}
-
-// Warn uses fmt.Sprint to construct and log a message.
-func (s *SugaredLogger) Warn(args ...interface{}) {
- s.log(WarnLevel, "", args, nil)
-}
-
-// Error uses fmt.Sprint to construct and log a message.
-func (s *SugaredLogger) Error(args ...interface{}) {
- s.log(ErrorLevel, "", args, nil)
-}
-
-// DPanic uses fmt.Sprint to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
-func (s *SugaredLogger) DPanic(args ...interface{}) {
- s.log(DPanicLevel, "", args, nil)
-}
-
-// Panic uses fmt.Sprint to construct and log a message, then panics.
-func (s *SugaredLogger) Panic(args ...interface{}) {
- s.log(PanicLevel, "", args, nil)
-}
-
-// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
-func (s *SugaredLogger) Fatal(args ...interface{}) {
- s.log(FatalLevel, "", args, nil)
-}
-
-// Debugf uses fmt.Sprintf to log a templated message.
-func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
- s.log(DebugLevel, template, args, nil)
-}
-
-// Infof uses fmt.Sprintf to log a templated message.
-func (s *SugaredLogger) Infof(template string, args ...interface{}) {
- s.log(InfoLevel, template, args, nil)
-}
-
-// Warnf uses fmt.Sprintf to log a templated message.
-func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
- s.log(WarnLevel, template, args, nil)
-}
-
-// Errorf uses fmt.Sprintf to log a templated message.
-func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
- s.log(ErrorLevel, template, args, nil)
-}
-
-// DPanicf uses fmt.Sprintf to log a templated message. In development, the
-// logger then panics. (See DPanicLevel for details.)
-func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
- s.log(DPanicLevel, template, args, nil)
-}
-
-// Panicf uses fmt.Sprintf to log a templated message, then panics.
-func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
- s.log(PanicLevel, template, args, nil)
-}
-
-// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
-func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
- s.log(FatalLevel, template, args, nil)
-}
-
-// Debugw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-//
-// When debug-level logging is disabled, this is much faster than
-//
-// s.With(keysAndValues).Debug(msg)
-func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
- s.log(DebugLevel, msg, nil, keysAndValues)
-}
-
-// Infow logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
- s.log(InfoLevel, msg, nil, keysAndValues)
-}
-
-// Warnw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
- s.log(WarnLevel, msg, nil, keysAndValues)
-}
-
-// Errorw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
- s.log(ErrorLevel, msg, nil, keysAndValues)
-}
-
-// DPanicw logs a message with some additional context. In development, the
-// logger then panics. (See DPanicLevel for details.) The variadic key-value
-// pairs are treated as they are in With.
-func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
- s.log(DPanicLevel, msg, nil, keysAndValues)
-}
-
-// Panicw logs a message with some additional context, then panics. The
-// variadic key-value pairs are treated as they are in With.
-func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
- s.log(PanicLevel, msg, nil, keysAndValues)
-}
-
-// Fatalw logs a message with some additional context, then calls os.Exit. The
-// variadic key-value pairs are treated as they are in With.
-func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
- s.log(FatalLevel, msg, nil, keysAndValues)
-}
-
-// Debugln uses fmt.Sprintln to construct and log a message.
-func (s *SugaredLogger) Debugln(args ...interface{}) {
- s.logln(DebugLevel, args, nil)
-}
-
-// Infoln uses fmt.Sprintln to construct and log a message.
-func (s *SugaredLogger) Infoln(args ...interface{}) {
- s.logln(InfoLevel, args, nil)
-}
-
-// Warnln uses fmt.Sprintln to construct and log a message.
-func (s *SugaredLogger) Warnln(args ...interface{}) {
- s.logln(WarnLevel, args, nil)
-}
-
-// Errorln uses fmt.Sprintln to construct and log a message.
-func (s *SugaredLogger) Errorln(args ...interface{}) {
- s.logln(ErrorLevel, args, nil)
-}
-
-// DPanicln uses fmt.Sprintln to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
-func (s *SugaredLogger) DPanicln(args ...interface{}) {
- s.logln(DPanicLevel, args, nil)
-}
-
-// Panicln uses fmt.Sprintln to construct and log a message, then panics.
-func (s *SugaredLogger) Panicln(args ...interface{}) {
- s.logln(PanicLevel, args, nil)
-}
-
-// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit.
-func (s *SugaredLogger) Fatalln(args ...interface{}) {
- s.logln(FatalLevel, args, nil)
-}
-
-// Sync flushes any buffered log entries.
-func (s *SugaredLogger) Sync() error {
- return s.base.Sync()
-}
-
-// log message with Sprint, Sprintf, or neither.
-func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
- // If logging at this level is completely disabled, skip the overhead of
- // string formatting.
- if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
- return
- }
-
- msg := getMessage(template, fmtArgs)
- if ce := s.base.Check(lvl, msg); ce != nil {
- ce.Write(s.sweetenFields(context)...)
- }
-}
-
-// logln message with Sprintln
-func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
- if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
- return
- }
-
- msg := getMessageln(fmtArgs)
- if ce := s.base.Check(lvl, msg); ce != nil {
- ce.Write(s.sweetenFields(context)...)
- }
-}
-
-// getMessage format with Sprint, Sprintf, or neither.
-func getMessage(template string, fmtArgs []interface{}) string {
- if len(fmtArgs) == 0 {
- return template
- }
-
- if template != "" {
- return fmt.Sprintf(template, fmtArgs...)
- }
-
- if len(fmtArgs) == 1 {
- if str, ok := fmtArgs[0].(string); ok {
- return str
- }
- }
- return fmt.Sprint(fmtArgs...)
-}
-
-// getMessageln format with Sprintln.
-func getMessageln(fmtArgs []interface{}) string {
- msg := fmt.Sprintln(fmtArgs...)
- return msg[:len(msg)-1]
-}
-
-func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
- if len(args) == 0 {
- return nil
- }
-
- var (
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields = make([]Field, 0, len(args))
- invalid invalidPairs
- seenError bool
- )
-
- for i := 0; i < len(args); {
- // This is a strongly-typed field. Consume it and move on.
- if f, ok := args[i].(Field); ok {
- fields = append(fields, f)
- i++
- continue
- }
-
- // If it is an error, consume it and move on.
- if err, ok := args[i].(error); ok {
- if !seenError {
- seenError = true
- fields = append(fields, Error(err))
- } else {
- s.base.Error(_multipleErrMsg, Error(err))
- }
- i++
- continue
- }
-
- // Make sure this element isn't a dangling key.
- if i == len(args)-1 {
- s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
- break
- }
-
- // Consume this value and the next, treating them as a key-value pair. If the
- // key isn't a string, add this pair to the slice of invalid pairs.
- key, val := args[i], args[i+1]
- if keyStr, ok := key.(string); !ok {
- // Subsequent errors are likely, so allocate once up front.
- if cap(invalid) == 0 {
- invalid = make(invalidPairs, 0, len(args)/2)
- }
- invalid = append(invalid, invalidPair{i, key, val})
- } else {
- fields = append(fields, Any(keyStr, val))
- }
- i += 2
- }
-
- // If we encountered any invalid key-value pairs, log an error.
- if len(invalid) > 0 {
- s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
- }
- return fields
-}
-
-type invalidPair struct {
- position int
- key, value interface{}
-}
-
-func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
- enc.AddInt64("position", int64(p.position))
- Any("key", p.key).AddTo(enc)
- Any("value", p.value).AddTo(enc)
- return nil
-}
-
-type invalidPairs []invalidPair
-
-func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
- var err error
- for i := range ps {
- err = multierr.Append(err, enc.AppendObject(ps[i]))
- }
- return err
-}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
deleted file mode 100644
index c5a1f16225..0000000000
--- a/vendor/go.uber.org/zap/time.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import "time"
-
-func timeToMillis(t time.Time) int64 {
- return t.UnixNano() / int64(time.Millisecond)
-}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
deleted file mode 100644
index f08728e1ec..0000000000
--- a/vendor/go.uber.org/zap/writer.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) 2016-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "fmt"
- "io"
-
- "go.uber.org/zap/zapcore"
-
- "go.uber.org/multierr"
-)
-
-// Open is a high-level wrapper that takes a variadic number of URLs, opens or
-// creates each of the specified resources, and combines them into a locked
-// WriteSyncer. It also returns any error encountered and a function to close
-// any opened files.
-//
-// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
-// scheme and URLs with the "file" scheme. Third-party code may register
-// factories for other schemes using RegisterSink.
-//
-// URLs with the "file" scheme must use absolute paths on the local
-// filesystem. No user, password, port, fragments, or query parameters are
-// allowed, and the hostname must be empty or "localhost".
-//
-// Since it's common to write logs to the local filesystem, URLs without a
-// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
-// a scheme, the special paths "stdout" and "stderr" are interpreted as
-// os.Stdout and os.Stderr. When specified without a scheme, relative file
-// paths also work.
-func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
- writers, close, err := open(paths)
- if err != nil {
- return nil, nil, err
- }
-
- writer := CombineWriteSyncers(writers...)
- return writer, close, nil
-}
-
-func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
- writers := make([]zapcore.WriteSyncer, 0, len(paths))
- closers := make([]io.Closer, 0, len(paths))
- close := func() {
- for _, c := range closers {
- c.Close()
- }
- }
-
- var openErr error
- for _, path := range paths {
- sink, err := _sinkRegistry.newSink(path)
- if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
- continue
- }
- writers = append(writers, sink)
- closers = append(closers, sink)
- }
- if openErr != nil {
- close()
- return nil, nil, openErr
- }
-
- return writers, close, nil
-}
-
-// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
-// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
-// WriteSyncer.
-//
-// It's provided purely as a convenience; the result is no different from
-// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
-func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
- if len(writers) == 0 {
- return zapcore.AddSync(io.Discard)
- }
- return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
-}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
deleted file mode 100644
index a40e93b3ec..0000000000
--- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "bufio"
- "sync"
- "time"
-
- "go.uber.org/multierr"
-)
-
-const (
- // _defaultBufferSize specifies the default size used by Buffer.
- _defaultBufferSize = 256 * 1024 // 256 kB
-
- // _defaultFlushInterval specifies the default flush interval for
- // Buffer.
- _defaultFlushInterval = 30 * time.Second
-)
-
-// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
-// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
-// fixed interval--whichever comes first.
-//
-// BufferedWriteSyncer is safe for concurrent use. You don't need to use
-// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
-//
-// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
-// destination (*os.File is a valid WriteSyncer), wrap it with
-// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
-// object.
-//
-// func main() {
-// ws := ... // your log destination
-// bws := &zapcore.BufferedWriteSyncer{WS: ws}
-// defer bws.Stop()
-//
-// // ...
-// core := zapcore.NewCore(enc, bws, lvl)
-// logger := zap.New(core)
-//
-// // ...
-// }
-//
-// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
-// waiting at most 30 seconds between flushes.
-// You can customize these parameters by setting the Size or FlushInterval
-// fields.
-// For example, the following buffers up to 512 kB of logs before flushing them
-// to Stderr, with a maximum of one minute between each flush.
-//
-// ws := &BufferedWriteSyncer{
-// WS: os.Stderr,
-// Size: 512 * 1024, // 512 kB
-// FlushInterval: time.Minute,
-// }
-// defer ws.Stop()
-type BufferedWriteSyncer struct {
- // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
- // writes.
- //
- // This field is required.
- WS WriteSyncer
-
- // Size specifies the maximum amount of data the writer will buffered
- // before flushing.
- //
- // Defaults to 256 kB if unspecified.
- Size int
-
- // FlushInterval specifies how often the writer should flush data if
- // there have been no writes.
- //
- // Defaults to 30 seconds if unspecified.
- FlushInterval time.Duration
-
- // Clock, if specified, provides control of the source of time for the
- // writer.
- //
- // Defaults to the system clock.
- Clock Clock
-
- // unexported fields for state
- mu sync.Mutex
- initialized bool // whether initialize() has run
- stopped bool // whether Stop() has run
- writer *bufio.Writer
- ticker *time.Ticker
- stop chan struct{} // closed when flushLoop should stop
- done chan struct{} // closed when flushLoop has stopped
-}
-
-func (s *BufferedWriteSyncer) initialize() {
- size := s.Size
- if size == 0 {
- size = _defaultBufferSize
- }
-
- flushInterval := s.FlushInterval
- if flushInterval == 0 {
- flushInterval = _defaultFlushInterval
- }
-
- if s.Clock == nil {
- s.Clock = DefaultClock
- }
-
- s.ticker = s.Clock.NewTicker(flushInterval)
- s.writer = bufio.NewWriterSize(s.WS, size)
- s.stop = make(chan struct{})
- s.done = make(chan struct{})
- s.initialized = true
- go s.flushLoop()
-}
-
-// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
-// and log data will be flushed to disk when the buffer is full or periodically.
-func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if !s.initialized {
- s.initialize()
- }
-
- // To avoid partial writes from being flushed, we manually flush the existing buffer if:
- // * The current write doesn't fit into the buffer fully, and
- // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
- if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
- if err := s.writer.Flush(); err != nil {
- return 0, err
- }
- }
-
- return s.writer.Write(bs)
-}
-
-// Sync flushes buffered log data into disk directly.
-func (s *BufferedWriteSyncer) Sync() error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var err error
- if s.initialized {
- err = s.writer.Flush()
- }
-
- return multierr.Append(err, s.WS.Sync())
-}
-
-// flushLoop flushes the buffer at the configured interval until Stop is
-// called.
-func (s *BufferedWriteSyncer) flushLoop() {
- defer close(s.done)
-
- for {
- select {
- case <-s.ticker.C:
- // we just simply ignore error here
- // because the underlying bufio writer stores any errors
- // and we return any error from Sync() as part of the close
- _ = s.Sync()
- case <-s.stop:
- return
- }
- }
-}
-
-// Stop closes the buffer, cleans up background goroutines, and flushes
-// remaining unwritten data.
-func (s *BufferedWriteSyncer) Stop() (err error) {
- var stopped bool
-
- // Critical section.
- func() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if !s.initialized {
- return
- }
-
- stopped = s.stopped
- if stopped {
- return
- }
- s.stopped = true
-
- s.ticker.Stop()
- close(s.stop) // tell flushLoop to stop
- <-s.done // and wait until it has
- }()
-
- // Don't call Sync on consecutive Stops.
- if !stopped {
- err = s.Sync()
- }
-
- return err
-}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
deleted file mode 100644
index 422fd82a6b..0000000000
--- a/vendor/go.uber.org/zap/zapcore/clock.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "time"
-
-// DefaultClock is the default clock used by Zap in operations that require
-// time. This clock uses the system clock for all operations.
-var DefaultClock = systemClock{}
-
-// Clock is a source of time for logged entries.
-type Clock interface {
- // Now returns the current local time.
- Now() time.Time
-
- // NewTicker returns *time.Ticker that holds a channel
- // that delivers "ticks" of a clock.
- NewTicker(time.Duration) *time.Ticker
-}
-
-// systemClock implements default Clock that uses system time.
-type systemClock struct{}
-
-func (systemClock) Now() time.Time {
- return time.Now()
-}
-
-func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
- return time.NewTicker(duration)
-}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
deleted file mode 100644
index 1aa5dc3646..0000000000
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "fmt"
- "sync"
-
- "go.uber.org/zap/buffer"
- "go.uber.org/zap/internal/bufferpool"
-)
-
-var _sliceEncoderPool = sync.Pool{
- New: func() interface{} {
- return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
- },
-}
-
-func getSliceEncoder() *sliceArrayEncoder {
- return _sliceEncoderPool.Get().(*sliceArrayEncoder)
-}
-
-func putSliceEncoder(e *sliceArrayEncoder) {
- e.elems = e.elems[:0]
- _sliceEncoderPool.Put(e)
-}
-
-type consoleEncoder struct {
- *jsonEncoder
-}
-
-// NewConsoleEncoder creates an encoder whose output is designed for human -
-// rather than machine - consumption. It serializes the core log entry data
-// (message, level, timestamp, etc.) in a plain-text format and leaves the
-// structured context as JSON.
-//
-// Note that although the console encoder doesn't use the keys specified in the
-// encoder configuration, it will omit any element whose key is set to the empty
-// string.
-func NewConsoleEncoder(cfg EncoderConfig) Encoder {
- if cfg.ConsoleSeparator == "" {
- // Use a default delimiter of '\t' for backwards compatibility
- cfg.ConsoleSeparator = "\t"
- }
- return consoleEncoder{newJSONEncoder(cfg, true)}
-}
-
-func (c consoleEncoder) Clone() Encoder {
- return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
-}
-
-func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
- line := bufferpool.Get()
-
- // We don't want the entry's metadata to be quoted and escaped (if it's
- // encoded as strings), which means that we can't use the JSON encoder. The
- // simplest option is to use the memory encoder and fmt.Fprint.
- //
- // If this ever becomes a performance bottleneck, we can implement
- // ArrayEncoder for our plain-text format.
- arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
- c.EncodeTime(ent.Time, arr)
- }
- if c.LevelKey != "" && c.EncodeLevel != nil {
- c.EncodeLevel(ent.Level, arr)
- }
- if ent.LoggerName != "" && c.NameKey != "" {
- nameEncoder := c.EncodeName
-
- if nameEncoder == nil {
- // Fall back to FullNameEncoder for backward compatibility.
- nameEncoder = FullNameEncoder
- }
-
- nameEncoder(ent.LoggerName, arr)
- }
- if ent.Caller.Defined {
- if c.CallerKey != "" && c.EncodeCaller != nil {
- c.EncodeCaller(ent.Caller, arr)
- }
- if c.FunctionKey != "" {
- arr.AppendString(ent.Caller.Function)
- }
- }
- for i := range arr.elems {
- if i > 0 {
- line.AppendString(c.ConsoleSeparator)
- }
- fmt.Fprint(line, arr.elems[i])
- }
- putSliceEncoder(arr)
-
- // Add the message itself.
- if c.MessageKey != "" {
- c.addSeparatorIfNecessary(line)
- line.AppendString(ent.Message)
- }
-
- // Add any structured context.
- c.writeContext(line, fields)
-
- // If there's no stacktrace key, honor that; this allows users to force
- // single-line output.
- if ent.Stack != "" && c.StacktraceKey != "" {
- line.AppendByte('\n')
- line.AppendString(ent.Stack)
- }
-
- line.AppendString(c.LineEnding)
- return line, nil
-}
-
-func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
- context := c.jsonEncoder.Clone().(*jsonEncoder)
- defer func() {
- // putJSONEncoder assumes the buffer is still used, but we write out the buffer so
- // we can free it.
- context.buf.Free()
- putJSONEncoder(context)
- }()
-
- addFields(context, extra)
- context.closeOpenNamespaces()
- if context.buf.Len() == 0 {
- return
- }
-
- c.addSeparatorIfNecessary(line)
- line.AppendByte('{')
- line.Write(context.buf.Bytes())
- line.AppendByte('}')
-}
-
-func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) {
- if line.Len() > 0 {
- line.AppendString(c.ConsoleSeparator)
- }
-}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
deleted file mode 100644
index 9dfd64051f..0000000000
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-// Core is a minimal, fast logger interface. It's designed for library authors
-// to wrap in a more user-friendly API.
-type Core interface {
- LevelEnabler
-
- // With adds structured context to the Core.
- With([]Field) Core
- // Check determines whether the supplied Entry should be logged (using the
- // embedded LevelEnabler and possibly some extra logic). If the entry
- // should be logged, the Core adds itself to the CheckedEntry and returns
- // the result.
- //
- // Callers must use Check before calling Write.
- Check(Entry, *CheckedEntry) *CheckedEntry
- // Write serializes the Entry and any Fields supplied at the log site and
- // writes them to their destination.
- //
- // If called, Write should always log the Entry and Fields; it should not
- // replicate the logic of Check.
- Write(Entry, []Field) error
- // Sync flushes buffered logs (if any).
- Sync() error
-}
-
-type nopCore struct{}
-
-// NewNopCore returns a no-op Core.
-func NewNopCore() Core { return nopCore{} }
-func (nopCore) Enabled(Level) bool { return false }
-func (n nopCore) With([]Field) Core { return n }
-func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
-func (nopCore) Write(Entry, []Field) error { return nil }
-func (nopCore) Sync() error { return nil }
-
-// NewCore creates a Core that writes logs to a WriteSyncer.
-func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
- return &ioCore{
- LevelEnabler: enab,
- enc: enc,
- out: ws,
- }
-}
-
-type ioCore struct {
- LevelEnabler
- enc Encoder
- out WriteSyncer
-}
-
-var (
- _ Core = (*ioCore)(nil)
- _ leveledEnabler = (*ioCore)(nil)
-)
-
-func (c *ioCore) Level() Level {
- return LevelOf(c.LevelEnabler)
-}
-
-func (c *ioCore) With(fields []Field) Core {
- clone := c.clone()
- addFields(clone.enc, fields)
- return clone
-}
-
-func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
- if c.Enabled(ent.Level) {
- return ce.AddCore(ent, c)
- }
- return ce
-}
-
-func (c *ioCore) Write(ent Entry, fields []Field) error {
- buf, err := c.enc.EncodeEntry(ent, fields)
- if err != nil {
- return err
- }
- _, err = c.out.Write(buf.Bytes())
- buf.Free()
- if err != nil {
- return err
- }
- if ent.Level > ErrorLevel {
- // Since we may be crashing the program, sync the output. Ignore Sync
- // errors, pending a clean solution to issue #370.
- c.Sync()
- }
- return nil
-}
-
-func (c *ioCore) Sync() error {
- return c.out.Sync()
-}
-
-func (c *ioCore) clone() *ioCore {
- return &ioCore{
- LevelEnabler: c.LevelEnabler,
- enc: c.enc.Clone(),
- out: c.out,
- }
-}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
deleted file mode 100644
index 31000e91f7..0000000000
--- a/vendor/go.uber.org/zap/zapcore/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package zapcore defines and implements the low-level interfaces upon which
-// zap is built. By providing alternate implementations of these interfaces,
-// external packages can extend zap's capabilities.
-package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
deleted file mode 100644
index 5769ff3e4e..0000000000
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "encoding/json"
- "io"
- "time"
-
- "go.uber.org/zap/buffer"
-)
-
-// DefaultLineEnding defines the default line ending when writing logs.
-// Alternate line endings specified in EncoderConfig can override this
-// behavior.
-const DefaultLineEnding = "\n"
-
-// OmitKey defines the key to use when callers want to remove a key from log output.
-const OmitKey = ""
-
-// A LevelEncoder serializes a Level to a primitive type.
-type LevelEncoder func(Level, PrimitiveArrayEncoder)
-
-// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
-// InfoLevel is serialized to "info".
-func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
- enc.AppendString(l.String())
-}
-
-// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
-// For example, InfoLevel is serialized to "info" and colored blue.
-func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
- s, ok := _levelToLowercaseColorString[l]
- if !ok {
- s = _unknownLevelColor.Add(l.String())
- }
- enc.AppendString(s)
-}
-
-// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
-// InfoLevel is serialized to "INFO".
-func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
- enc.AppendString(l.CapitalString())
-}
-
-// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
-// For example, InfoLevel is serialized to "INFO" and colored blue.
-func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
- s, ok := _levelToCapitalColorString[l]
- if !ok {
- s = _unknownLevelColor.Add(l.CapitalString())
- }
- enc.AppendString(s)
-}
-
-// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
-// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
-// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
-// is unmarshaled to LowercaseLevelEncoder.
-func (e *LevelEncoder) UnmarshalText(text []byte) error {
- switch string(text) {
- case "capital":
- *e = CapitalLevelEncoder
- case "capitalColor":
- *e = CapitalColorLevelEncoder
- case "color":
- *e = LowercaseColorLevelEncoder
- default:
- *e = LowercaseLevelEncoder
- }
- return nil
-}
-
-// A TimeEncoder serializes a time.Time to a primitive type.
-type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
-
-// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
-// since the Unix epoch.
-func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- nanos := t.UnixNano()
- sec := float64(nanos) / float64(time.Second)
- enc.AppendFloat64(sec)
-}
-
-// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
-// milliseconds since the Unix epoch.
-func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- nanos := t.UnixNano()
- millis := float64(nanos) / float64(time.Millisecond)
- enc.AppendFloat64(millis)
-}
-
-// EpochNanosTimeEncoder serializes a time.Time to an integer number of
-// nanoseconds since the Unix epoch.
-func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- enc.AppendInt64(t.UnixNano())
-}
-
-func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) {
- type appendTimeEncoder interface {
- AppendTimeLayout(time.Time, string)
- }
-
- if enc, ok := enc.(appendTimeEncoder); ok {
- enc.AppendTimeLayout(t, layout)
- return
- }
-
- enc.AppendString(t.Format(layout))
-}
-
-// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
-// with millisecond precision.
-//
-// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
-// instead of appending a pre-formatted string value.
-func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc)
-}
-
-// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string.
-//
-// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
-// instead of appending a pre-formatted string value.
-func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- encodeTimeLayout(t, time.RFC3339, enc)
-}
-
-// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string
-// with nanosecond precision.
-//
-// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
-// instead of appending a pre-formatted string value.
-func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
- encodeTimeLayout(t, time.RFC3339Nano, enc)
-}
-
-// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using
-// given layout.
-func TimeEncoderOfLayout(layout string) TimeEncoder {
- return func(t time.Time, enc PrimitiveArrayEncoder) {
- encodeTimeLayout(t, layout, enc)
- }
-}
-
-// UnmarshalText unmarshals text to a TimeEncoder.
-// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder.
-// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder.
-// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder.
-// "millis" is unmarshaled to EpochMillisTimeEncoder.
-// "nanos" is unmarshaled to EpochNanosEncoder.
-// Anything else is unmarshaled to EpochTimeEncoder.
-func (e *TimeEncoder) UnmarshalText(text []byte) error {
- switch string(text) {
- case "rfc3339nano", "RFC3339Nano":
- *e = RFC3339NanoTimeEncoder
- case "rfc3339", "RFC3339":
- *e = RFC3339TimeEncoder
- case "iso8601", "ISO8601":
- *e = ISO8601TimeEncoder
- case "millis":
- *e = EpochMillisTimeEncoder
- case "nanos":
- *e = EpochNanosTimeEncoder
- default:
- *e = EpochTimeEncoder
- }
- return nil
-}
-
-// UnmarshalYAML unmarshals YAML to a TimeEncoder.
-// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
-//
-// timeEncoder:
-// layout: 06/01/02 03:04pm
-//
-// If value is string, it uses UnmarshalText.
-//
-// timeEncoder: iso8601
-func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var o struct {
- Layout string `json:"layout" yaml:"layout"`
- }
- if err := unmarshal(&o); err == nil {
- *e = TimeEncoderOfLayout(o.Layout)
- return nil
- }
-
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- return e.UnmarshalText([]byte(s))
-}
-
-// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does.
-func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
- return e.UnmarshalYAML(func(v interface{}) error {
- return json.Unmarshal(data, v)
- })
-}
-
-// A DurationEncoder serializes a time.Duration to a primitive type.
-type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
-
-// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
-func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
- enc.AppendFloat64(float64(d) / float64(time.Second))
-}
-
-// NanosDurationEncoder serializes a time.Duration to an integer number of
-// nanoseconds elapsed.
-func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
- enc.AppendInt64(int64(d))
-}
-
-// MillisDurationEncoder serializes a time.Duration to an integer number of
-// milliseconds elapsed.
-func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
- enc.AppendInt64(d.Nanoseconds() / 1e6)
-}
-
-// StringDurationEncoder serializes a time.Duration using its built-in String
-// method.
-func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
- enc.AppendString(d.String())
-}
-
-// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
-// to StringDurationEncoder, and anything else is unmarshaled to
-// NanosDurationEncoder.
-func (e *DurationEncoder) UnmarshalText(text []byte) error {
- switch string(text) {
- case "string":
- *e = StringDurationEncoder
- case "nanos":
- *e = NanosDurationEncoder
- case "ms":
- *e = MillisDurationEncoder
- default:
- *e = SecondsDurationEncoder
- }
- return nil
-}
-
-// A CallerEncoder serializes an EntryCaller to a primitive type.
-type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
-
-// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
-// format.
-func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
- // TODO: consider using a byte-oriented API to save an allocation.
- enc.AppendString(caller.String())
-}
-
-// ShortCallerEncoder serializes a caller in package/file:line format, trimming
-// all but the final directory from the full path.
-func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
- // TODO: consider using a byte-oriented API to save an allocation.
- enc.AppendString(caller.TrimmedPath())
-}
-
-// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
-// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
-func (e *CallerEncoder) UnmarshalText(text []byte) error {
- switch string(text) {
- case "full":
- *e = FullCallerEncoder
- default:
- *e = ShortCallerEncoder
- }
- return nil
-}
-
-// A NameEncoder serializes a period-separated logger name to a primitive
-// type.
-type NameEncoder func(string, PrimitiveArrayEncoder)
-
-// FullNameEncoder serializes the logger name as-is.
-func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
- enc.AppendString(loggerName)
-}
-
-// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
-// unmarshaled to FullNameEncoder.
-func (e *NameEncoder) UnmarshalText(text []byte) error {
- switch string(text) {
- case "full":
- *e = FullNameEncoder
- default:
- *e = FullNameEncoder
- }
- return nil
-}
-
-// An EncoderConfig allows users to configure the concrete encoders supplied by
-// zapcore.
-type EncoderConfig struct {
- // Set the keys used for each log entry. If any key is empty, that portion
- // of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
- // Configure the primitive representations of common complex types. For
- // example, some users may want all time.Times serialized as floating-point
- // seconds since epoch, while others may prefer ISO8601 strings.
- EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
- EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
- EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
- EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
- // Unlike the other primitive type encoders, EncodeName is optional. The
- // zero value falls back to FullNameEncoder.
- EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
- // Configure the encoder for interface{} type objects.
- // If not provided, objects are encoded using json.Encoder
- NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
- // Configures the field separator used by the console encoder. Defaults
- // to tab.
- ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
-}
-
-// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
-// map- or struct-like object to the logging context. Like maps, ObjectEncoders
-// aren't safe for concurrent use (though typical use shouldn't require locks).
-type ObjectEncoder interface {
- // Logging-specific marshalers.
- AddArray(key string, marshaler ArrayMarshaler) error
- AddObject(key string, marshaler ObjectMarshaler) error
-
- // Built-in types.
- AddBinary(key string, value []byte) // for arbitrary bytes
- AddByteString(key string, value []byte) // for UTF-8 encoded bytes
- AddBool(key string, value bool)
- AddComplex128(key string, value complex128)
- AddComplex64(key string, value complex64)
- AddDuration(key string, value time.Duration)
- AddFloat64(key string, value float64)
- AddFloat32(key string, value float32)
- AddInt(key string, value int)
- AddInt64(key string, value int64)
- AddInt32(key string, value int32)
- AddInt16(key string, value int16)
- AddInt8(key string, value int8)
- AddString(key, value string)
- AddTime(key string, value time.Time)
- AddUint(key string, value uint)
- AddUint64(key string, value uint64)
- AddUint32(key string, value uint32)
- AddUint16(key string, value uint16)
- AddUint8(key string, value uint8)
- AddUintptr(key string, value uintptr)
-
- // AddReflected uses reflection to serialize arbitrary objects, so it can be
- // slow and allocation-heavy.
- AddReflected(key string, value interface{}) error
- // OpenNamespace opens an isolated namespace where all subsequent fields will
- // be added. Applications can use namespaces to prevent key collisions when
- // injecting loggers into sub-components or third-party libraries.
- OpenNamespace(key string)
-}
-
-// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
-// array-like objects to the logging context. Of note, it supports mixed-type
-// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
-// aren't safe for concurrent use (though typical use shouldn't require locks).
-type ArrayEncoder interface {
- // Built-in types.
- PrimitiveArrayEncoder
-
- // Time-related types.
- AppendDuration(time.Duration)
- AppendTime(time.Time)
-
- // Logging-specific marshalers.
- AppendArray(ArrayMarshaler) error
- AppendObject(ObjectMarshaler) error
-
- // AppendReflected uses reflection to serialize arbitrary objects, so it's
- // slow and allocation-heavy.
- AppendReflected(value interface{}) error
-}
-
-// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
-// only in Go's built-in types. It's included only so that Duration- and
-// TimeEncoders cannot trigger infinite recursion.
-type PrimitiveArrayEncoder interface {
- // Built-in types.
- AppendBool(bool)
- AppendByteString([]byte) // for UTF-8 encoded bytes
- AppendComplex128(complex128)
- AppendComplex64(complex64)
- AppendFloat64(float64)
- AppendFloat32(float32)
- AppendInt(int)
- AppendInt64(int64)
- AppendInt32(int32)
- AppendInt16(int16)
- AppendInt8(int8)
- AppendString(string)
- AppendUint(uint)
- AppendUint64(uint64)
- AppendUint32(uint32)
- AppendUint16(uint16)
- AppendUint8(uint8)
- AppendUintptr(uintptr)
-}
-
-// Encoder is a format-agnostic interface for all log entry marshalers. Since
-// log encoders don't need to support the same wide range of use cases as
-// general-purpose marshalers, it's possible to make them faster and
-// lower-allocation.
-//
-// Implementations of the ObjectEncoder interface's methods can, of course,
-// freely modify the receiver. However, the Clone and EncodeEntry methods will
-// be called concurrently and shouldn't modify the receiver.
-type Encoder interface {
- ObjectEncoder
-
- // Clone copies the encoder, ensuring that adding fields to the copy doesn't
- // affect the original.
- Clone() Encoder
-
- // EncodeEntry encodes an entry and fields, along with any accumulated
- // context, into a byte buffer and returns it. Any fields that are empty,
- // including fields on the `Entry` type, should be omitted.
- EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
-}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
deleted file mode 100644
index 9d326e95ea..0000000000
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "fmt"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "go.uber.org/multierr"
- "go.uber.org/zap/internal/bufferpool"
- "go.uber.org/zap/internal/exit"
-)
-
-var (
- _cePool = sync.Pool{New: func() interface{} {
- // Pre-allocate some space for cores.
- return &CheckedEntry{
- cores: make([]Core, 4),
- }
- }}
-)
-
-func getCheckedEntry() *CheckedEntry {
- ce := _cePool.Get().(*CheckedEntry)
- ce.reset()
- return ce
-}
-
-func putCheckedEntry(ce *CheckedEntry) {
- if ce == nil {
- return
- }
- _cePool.Put(ce)
-}
-
-// NewEntryCaller makes an EntryCaller from the return signature of
-// runtime.Caller.
-func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
- if !ok {
- return EntryCaller{}
- }
- return EntryCaller{
- PC: pc,
- File: file,
- Line: line,
- Defined: true,
- }
-}
-
-// EntryCaller represents the caller of a logging function.
-type EntryCaller struct {
- Defined bool
- PC uintptr
- File string
- Line int
- Function string
-}
-
-// String returns the full path and line number of the caller.
-func (ec EntryCaller) String() string {
- return ec.FullPath()
-}
-
-// FullPath returns a /full/path/to/package/file:line description of the
-// caller.
-func (ec EntryCaller) FullPath() string {
- if !ec.Defined {
- return "undefined"
- }
- buf := bufferpool.Get()
- buf.AppendString(ec.File)
- buf.AppendByte(':')
- buf.AppendInt(int64(ec.Line))
- caller := buf.String()
- buf.Free()
- return caller
-}
-
-// TrimmedPath returns a package/file:line description of the caller,
-// preserving only the leaf directory name and file name.
-func (ec EntryCaller) TrimmedPath() string {
- if !ec.Defined {
- return "undefined"
- }
- // nb. To make sure we trim the path correctly on Windows too, we
- // counter-intuitively need to use '/' and *not* os.PathSeparator here,
- // because the path given originates from Go stdlib, specifically
- // runtime.Caller() which (as of Mar/17) returns forward slashes even on
- // Windows.
- //
- // See https://github.com/golang/go/issues/3335
- // and https://github.com/golang/go/issues/18151
- //
- // for discussion on the issue on Go side.
- //
- // Find the last separator.
- //
- idx := strings.LastIndexByte(ec.File, '/')
- if idx == -1 {
- return ec.FullPath()
- }
- // Find the penultimate separator.
- idx = strings.LastIndexByte(ec.File[:idx], '/')
- if idx == -1 {
- return ec.FullPath()
- }
- buf := bufferpool.Get()
- // Keep everything after the penultimate separator.
- buf.AppendString(ec.File[idx+1:])
- buf.AppendByte(':')
- buf.AppendInt(int64(ec.Line))
- caller := buf.String()
- buf.Free()
- return caller
-}
-
-// An Entry represents a complete log message. The entry's structured context
-// is already serialized, but the log level, time, message, and call site
-// information are available for inspection and modification. Any fields left
-// empty will be omitted when encoding.
-//
-// Entries are pooled, so any functions that accept them MUST be careful not to
-// retain references to them.
-type Entry struct {
- Level Level
- Time time.Time
- LoggerName string
- Message string
- Caller EntryCaller
- Stack string
-}
-
-// CheckWriteHook is a custom action that may be executed after an entry is
-// written.
-//
-// Register one on a CheckedEntry with the After method.
-//
-// if ce := logger.Check(...); ce != nil {
-// ce = ce.After(hook)
-// ce.Write(...)
-// }
-//
-// You can configure the hook for Fatal log statements at the logger level with
-// the zap.WithFatalHook option.
-type CheckWriteHook interface {
- // OnWrite is invoked with the CheckedEntry that was written and a list
- // of fields added with that entry.
- //
- // The list of fields DOES NOT include fields that were already added
- // to the logger with the With method.
- OnWrite(*CheckedEntry, []Field)
-}
-
-// CheckWriteAction indicates what action to take after a log entry is
-// processed. Actions are ordered in increasing severity.
-type CheckWriteAction uint8
-
-const (
- // WriteThenNoop indicates that nothing special needs to be done. It's the
- // default behavior.
- WriteThenNoop CheckWriteAction = iota
- // WriteThenGoexit runs runtime.Goexit after Write.
- WriteThenGoexit
- // WriteThenPanic causes a panic after Write.
- WriteThenPanic
- // WriteThenFatal causes an os.Exit(1) after Write.
- WriteThenFatal
-)
-
-// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
-// with the new CheckWriteHook interface which deprecates CheckWriteAction.
-func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
- switch a {
- case WriteThenGoexit:
- runtime.Goexit()
- case WriteThenPanic:
- panic(ce.Message)
- case WriteThenFatal:
- exit.With(1)
- }
-}
-
-var _ CheckWriteHook = CheckWriteAction(0)
-
-// CheckedEntry is an Entry together with a collection of Cores that have
-// already agreed to log it.
-//
-// CheckedEntry references should be created by calling AddCore or After on a
-// nil *CheckedEntry. References are returned to a pool after Write, and MUST
-// NOT be retained after calling their Write method.
-type CheckedEntry struct {
- Entry
- ErrorOutput WriteSyncer
- dirty bool // best-effort detection of pool misuse
- after CheckWriteHook
- cores []Core
-}
-
-func (ce *CheckedEntry) reset() {
- ce.Entry = Entry{}
- ce.ErrorOutput = nil
- ce.dirty = false
- ce.after = nil
- for i := range ce.cores {
- // don't keep references to cores
- ce.cores[i] = nil
- }
- ce.cores = ce.cores[:0]
-}
-
-// Write writes the entry to the stored Cores, returns any errors, and returns
-// the CheckedEntry reference to a pool for immediate re-use. Finally, it
-// executes any required CheckWriteAction.
-func (ce *CheckedEntry) Write(fields ...Field) {
- if ce == nil {
- return
- }
-
- if ce.dirty {
- if ce.ErrorOutput != nil {
- // Make a best effort to detect unsafe re-use of this CheckedEntry.
- // If the entry is dirty, log an internal error; because the
- // CheckedEntry is being used after it was returned to the pool,
- // the message may be an amalgamation from multiple call sites.
- fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
- ce.ErrorOutput.Sync()
- }
- return
- }
- ce.dirty = true
-
- var err error
- for i := range ce.cores {
- err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
- }
- if err != nil && ce.ErrorOutput != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
- ce.ErrorOutput.Sync()
- }
-
- hook := ce.after
- if hook != nil {
- hook.OnWrite(ce, fields)
- }
- putCheckedEntry(ce)
-}
-
-// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
-// used by Core.Check implementations, and is safe to call on nil CheckedEntry
-// references.
-func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
- if ce == nil {
- ce = getCheckedEntry()
- ce.Entry = ent
- }
- ce.cores = append(ce.cores, core)
- return ce
-}
-
-// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
-// Core will panic or fatal after writing this log entry. Like AddCore, it's
-// safe to call on nil CheckedEntry references.
-//
-// Deprecated: Use [CheckedEntry.After] instead.
-func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
- return ce.After(ent, should)
-}
-
-// After sets this CheckEntry's CheckWriteHook, which will be called after this
-// log entry has been written. It's safe to call this on nil CheckedEntry
-// references.
-func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
- if ce == nil {
- ce = getCheckedEntry()
- ce.Entry = ent
- }
- ce.after = hook
- return ce
-}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
deleted file mode 100644
index 06359907af..0000000000
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-// Encodes the given error into fields of an object. A field with the given
-// name is added for the error message.
-//
-// If the error implements fmt.Formatter, a field with the name ${key}Verbose
-// is also added with the full verbose error message.
-//
-// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
-// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
-// array of objects containing the errors this error was comprised of.
-//
-// {
-// "error": err.Error(),
-// "errorVerbose": fmt.Sprintf("%+v", err),
-// "errorCauses": [
-// ...
-// ],
-// }
-func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
- // Try to capture panics (from nil references or otherwise) when calling
- // the Error() method
- defer func() {
- if rerr := recover(); rerr != nil {
- // If it's a nil pointer, just say "". The likeliest causes are a
- // error that fails to guard against nil or a nil pointer for a
- // value receiver, and in either case, "" is a nice result.
- if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
- enc.AddString(key, "")
- return
- }
-
- retErr = fmt.Errorf("PANIC=%v", rerr)
- }
- }()
-
- basic := err.Error()
- enc.AddString(key, basic)
-
- switch e := err.(type) {
- case errorGroup:
- return enc.AddArray(key+"Causes", errArray(e.Errors()))
- case fmt.Formatter:
- verbose := fmt.Sprintf("%+v", e)
- if verbose != basic {
- // This is a rich error type, like those produced by
- // github.com/pkg/errors.
- enc.AddString(key+"Verbose", verbose)
- }
- }
- return nil
-}
-
-type errorGroup interface {
- // Provides read-only access to the underlying list of errors, preferably
- // without causing any allocs.
- Errors() []error
-}
-
-// Note that errArray and errArrayElem are very similar to the version
-// implemented in the top-level error.go file. We can't re-use this because
-// that would require exporting errArray as part of the zapcore API.
-
-// Encodes a list of errors using the standard error encoding logic.
-type errArray []error
-
-func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
- for i := range errs {
- if errs[i] == nil {
- continue
- }
-
- el := newErrArrayElem(errs[i])
- arr.AppendObject(el)
- el.Free()
- }
- return nil
-}
-
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
- return &errArrayElem{}
-}}
-
-// Encodes any error into a {"error": ...} re-using the same errors logic.
-//
-// May be passed in place of an array to build a single-element array.
-type errArrayElem struct{ err error }
-
-func newErrArrayElem(err error) *errArrayElem {
- e := _errArrayElemPool.Get().(*errArrayElem)
- e.err = err
- return e
-}
-
-func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
- return arr.AppendObject(e)
-}
-
-func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
- return encodeError("error", e.err, enc)
-}
-
-func (e *errArrayElem) Free() {
- e.err = nil
- _errArrayElemPool.Put(e)
-}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
deleted file mode 100644
index 95bdb0a126..0000000000
--- a/vendor/go.uber.org/zap/zapcore/field.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "bytes"
- "fmt"
- "math"
- "reflect"
- "time"
-)
-
-// A FieldType indicates which member of the Field union struct should be used
-// and how it should be serialized.
-type FieldType uint8
-
-const (
- // UnknownType is the default field type. Attempting to add it to an encoder will panic.
- UnknownType FieldType = iota
- // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
- ArrayMarshalerType
- // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
- ObjectMarshalerType
- // BinaryType indicates that the field carries an opaque binary blob.
- BinaryType
- // BoolType indicates that the field carries a bool.
- BoolType
- // ByteStringType indicates that the field carries UTF-8 encoded bytes.
- ByteStringType
- // Complex128Type indicates that the field carries a complex128.
- Complex128Type
- // Complex64Type indicates that the field carries a complex128.
- Complex64Type
- // DurationType indicates that the field carries a time.Duration.
- DurationType
- // Float64Type indicates that the field carries a float64.
- Float64Type
- // Float32Type indicates that the field carries a float32.
- Float32Type
- // Int64Type indicates that the field carries an int64.
- Int64Type
- // Int32Type indicates that the field carries an int32.
- Int32Type
- // Int16Type indicates that the field carries an int16.
- Int16Type
- // Int8Type indicates that the field carries an int8.
- Int8Type
- // StringType indicates that the field carries a string.
- StringType
- // TimeType indicates that the field carries a time.Time that is
- // representable by a UnixNano() stored as an int64.
- TimeType
- // TimeFullType indicates that the field carries a time.Time stored as-is.
- TimeFullType
- // Uint64Type indicates that the field carries a uint64.
- Uint64Type
- // Uint32Type indicates that the field carries a uint32.
- Uint32Type
- // Uint16Type indicates that the field carries a uint16.
- Uint16Type
- // Uint8Type indicates that the field carries a uint8.
- Uint8Type
- // UintptrType indicates that the field carries a uintptr.
- UintptrType
- // ReflectType indicates that the field carries an interface{}, which should
- // be serialized using reflection.
- ReflectType
- // NamespaceType signals the beginning of an isolated namespace. All
- // subsequent fields should be added to the new namespace.
- NamespaceType
- // StringerType indicates that the field carries a fmt.Stringer.
- StringerType
- // ErrorType indicates that the field carries an error.
- ErrorType
- // SkipType indicates that the field is a no-op.
- SkipType
-
- // InlineMarshalerType indicates that the field carries an ObjectMarshaler
- // that should be inlined.
- InlineMarshalerType
-)
-
-// A Field is a marshaling operation used to add a key-value pair to a logger's
-// context. Most fields are lazily marshaled, so it's inexpensive to add fields
-// to disabled debug-level log statements.
-type Field struct {
- Key string
- Type FieldType
- Integer int64
- String string
- Interface interface{}
-}
-
-// AddTo exports a field through the ObjectEncoder interface. It's primarily
-// useful to library authors, and shouldn't be necessary in most applications.
-func (f Field) AddTo(enc ObjectEncoder) {
- var err error
-
- switch f.Type {
- case ArrayMarshalerType:
- err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
- case ObjectMarshalerType:
- err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
- case InlineMarshalerType:
- err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc)
- case BinaryType:
- enc.AddBinary(f.Key, f.Interface.([]byte))
- case BoolType:
- enc.AddBool(f.Key, f.Integer == 1)
- case ByteStringType:
- enc.AddByteString(f.Key, f.Interface.([]byte))
- case Complex128Type:
- enc.AddComplex128(f.Key, f.Interface.(complex128))
- case Complex64Type:
- enc.AddComplex64(f.Key, f.Interface.(complex64))
- case DurationType:
- enc.AddDuration(f.Key, time.Duration(f.Integer))
- case Float64Type:
- enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
- case Float32Type:
- enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
- case Int64Type:
- enc.AddInt64(f.Key, f.Integer)
- case Int32Type:
- enc.AddInt32(f.Key, int32(f.Integer))
- case Int16Type:
- enc.AddInt16(f.Key, int16(f.Integer))
- case Int8Type:
- enc.AddInt8(f.Key, int8(f.Integer))
- case StringType:
- enc.AddString(f.Key, f.String)
- case TimeType:
- if f.Interface != nil {
- enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
- } else {
- // Fall back to UTC if location is nil.
- enc.AddTime(f.Key, time.Unix(0, f.Integer))
- }
- case TimeFullType:
- enc.AddTime(f.Key, f.Interface.(time.Time))
- case Uint64Type:
- enc.AddUint64(f.Key, uint64(f.Integer))
- case Uint32Type:
- enc.AddUint32(f.Key, uint32(f.Integer))
- case Uint16Type:
- enc.AddUint16(f.Key, uint16(f.Integer))
- case Uint8Type:
- enc.AddUint8(f.Key, uint8(f.Integer))
- case UintptrType:
- enc.AddUintptr(f.Key, uintptr(f.Integer))
- case ReflectType:
- err = enc.AddReflected(f.Key, f.Interface)
- case NamespaceType:
- enc.OpenNamespace(f.Key)
- case StringerType:
- err = encodeStringer(f.Key, f.Interface, enc)
- case ErrorType:
- err = encodeError(f.Key, f.Interface.(error), enc)
- case SkipType:
- break
- default:
- panic(fmt.Sprintf("unknown field type: %v", f))
- }
-
- if err != nil {
- enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
- }
-}
-
-// Equals returns whether two fields are equal. For non-primitive types such as
-// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
-func (f Field) Equals(other Field) bool {
- if f.Type != other.Type {
- return false
- }
- if f.Key != other.Key {
- return false
- }
-
- switch f.Type {
- case BinaryType, ByteStringType:
- return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
- case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
- return reflect.DeepEqual(f.Interface, other.Interface)
- default:
- return f == other
- }
-}
-
-func addFields(enc ObjectEncoder, fields []Field) {
- for i := range fields {
- fields[i].AddTo(enc)
- }
-}
-
-func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) {
- // Try to capture panics (from nil references or otherwise) when calling
- // the String() method, similar to https://golang.org/src/fmt/print.go#L540
- defer func() {
- if err := recover(); err != nil {
- // If it's a nil pointer, just say "". The likeliest causes are a
- // Stringer that fails to guard against nil or a nil pointer for a
- // value receiver, and in either case, "" is a nice result.
- if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() {
- enc.AddString(key, "")
- return
- }
-
- retErr = fmt.Errorf("PANIC=%v", err)
- }
- }()
-
- enc.AddString(key, stringer.(fmt.Stringer).String())
- return nil
-}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
deleted file mode 100644
index 198def9917..0000000000
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "go.uber.org/multierr"
-
-type hooked struct {
- Core
- funcs []func(Entry) error
-}
-
-var (
- _ Core = (*hooked)(nil)
- _ leveledEnabler = (*hooked)(nil)
-)
-
-// RegisterHooks wraps a Core and runs a collection of user-defined callback
-// hooks each time a message is logged. Execution of the callbacks is blocking.
-//
-// This offers users an easy way to register simple callbacks (e.g., metrics
-// collection) without implementing the full Core interface.
-func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
- funcs := append([]func(Entry) error{}, hooks...)
- return &hooked{
- Core: core,
- funcs: funcs,
- }
-}
-
-func (h *hooked) Level() Level {
- return LevelOf(h.Core)
-}
-
-func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
- // Let the wrapped Core decide whether to log this message or not. This
- // also gives the downstream a chance to register itself directly with the
- // CheckedEntry.
- if downstream := h.Core.Check(ent, ce); downstream != nil {
- return downstream.AddCore(ent, h)
- }
- return ce
-}
-
-func (h *hooked) With(fields []Field) Core {
- return &hooked{
- Core: h.Core.With(fields),
- funcs: h.funcs,
- }
-}
-
-func (h *hooked) Write(ent Entry, _ []Field) error {
- // Since our downstream had a chance to register itself directly with the
- // CheckedMessage, we don't need to call it here.
- var err error
- for i := range h.funcs {
- err = multierr.Append(err, h.funcs[i](ent))
- }
- return err
-}
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
deleted file mode 100644
index 7a11237ae9..0000000000
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "fmt"
-
-type levelFilterCore struct {
- core Core
- level LevelEnabler
-}
-
-var (
- _ Core = (*levelFilterCore)(nil)
- _ leveledEnabler = (*levelFilterCore)(nil)
-)
-
-// NewIncreaseLevelCore creates a core that can be used to increase the level of
-// an existing Core. It cannot be used to decrease the logging level, as it acts
-// as a filter before calling the underlying core. If level decreases the log level,
-// an error is returned.
-func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) {
- for l := _maxLevel; l >= _minLevel; l-- {
- if !core.Enabled(l) && level.Enabled(l) {
- return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l)
- }
- }
-
- return &levelFilterCore{core, level}, nil
-}
-
-func (c *levelFilterCore) Enabled(lvl Level) bool {
- return c.level.Enabled(lvl)
-}
-
-func (c *levelFilterCore) Level() Level {
- return LevelOf(c.level)
-}
-
-func (c *levelFilterCore) With(fields []Field) Core {
- return &levelFilterCore{c.core.With(fields), c.level}
-}
-
-func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
- if !c.Enabled(ent.Level) {
- return ce
- }
-
- return c.core.Check(ent, ce)
-}
-
-func (c *levelFilterCore) Write(ent Entry, fields []Field) error {
- return c.core.Write(ent, fields)
-}
-
-func (c *levelFilterCore) Sync() error {
- return c.core.Sync()
-}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
deleted file mode 100644
index 3921c5cd33..0000000000
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ /dev/null
@@ -1,562 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "encoding/base64"
- "math"
- "sync"
- "time"
- "unicode/utf8"
-
- "go.uber.org/zap/buffer"
- "go.uber.org/zap/internal/bufferpool"
-)
-
-// For JSON-escaping; see jsonEncoder.safeAddString below.
-const _hex = "0123456789abcdef"
-
-var _jsonPool = sync.Pool{New: func() interface{} {
- return &jsonEncoder{}
-}}
-
-func getJSONEncoder() *jsonEncoder {
- return _jsonPool.Get().(*jsonEncoder)
-}
-
-func putJSONEncoder(enc *jsonEncoder) {
- if enc.reflectBuf != nil {
- enc.reflectBuf.Free()
- }
- enc.EncoderConfig = nil
- enc.buf = nil
- enc.spaced = false
- enc.openNamespaces = 0
- enc.reflectBuf = nil
- enc.reflectEnc = nil
- _jsonPool.Put(enc)
-}
-
-type jsonEncoder struct {
- *EncoderConfig
- buf *buffer.Buffer
- spaced bool // include spaces after colons and commas
- openNamespaces int
-
- // for encoding generic values by reflection
- reflectBuf *buffer.Buffer
- reflectEnc ReflectedEncoder
-}
-
-// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
-// appropriately escapes all field keys and values.
-//
-// Note that the encoder doesn't deduplicate keys, so it's possible to produce
-// a message like
-//
-// {"foo":"bar","foo":"baz"}
-//
-// This is permitted by the JSON specification, but not encouraged. Many
-// libraries will ignore duplicate key-value pairs (typically keeping the last
-// pair) when unmarshaling, but users should attempt to avoid adding duplicate
-// keys.
-func NewJSONEncoder(cfg EncoderConfig) Encoder {
- return newJSONEncoder(cfg, false)
-}
-
-func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
- if cfg.SkipLineEnding {
- cfg.LineEnding = ""
- } else if cfg.LineEnding == "" {
- cfg.LineEnding = DefaultLineEnding
- }
-
- // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
- if cfg.NewReflectedEncoder == nil {
- cfg.NewReflectedEncoder = defaultReflectedEncoder
- }
-
- return &jsonEncoder{
- EncoderConfig: &cfg,
- buf: bufferpool.Get(),
- spaced: spaced,
- }
-}
-
-func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
- enc.addKey(key)
- return enc.AppendArray(arr)
-}
-
-func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
- enc.addKey(key)
- return enc.AppendObject(obj)
-}
-
-func (enc *jsonEncoder) AddBinary(key string, val []byte) {
- enc.AddString(key, base64.StdEncoding.EncodeToString(val))
-}
-
-func (enc *jsonEncoder) AddByteString(key string, val []byte) {
- enc.addKey(key)
- enc.AppendByteString(val)
-}
-
-func (enc *jsonEncoder) AddBool(key string, val bool) {
- enc.addKey(key)
- enc.AppendBool(val)
-}
-
-func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
- enc.addKey(key)
- enc.AppendComplex128(val)
-}
-
-func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
- enc.addKey(key)
- enc.AppendComplex64(val)
-}
-
-func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
- enc.addKey(key)
- enc.AppendDuration(val)
-}
-
-func (enc *jsonEncoder) AddFloat64(key string, val float64) {
- enc.addKey(key)
- enc.AppendFloat64(val)
-}
-
-func (enc *jsonEncoder) AddFloat32(key string, val float32) {
- enc.addKey(key)
- enc.AppendFloat32(val)
-}
-
-func (enc *jsonEncoder) AddInt64(key string, val int64) {
- enc.addKey(key)
- enc.AppendInt64(val)
-}
-
-func (enc *jsonEncoder) resetReflectBuf() {
- if enc.reflectBuf == nil {
- enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
- } else {
- enc.reflectBuf.Reset()
- }
-}
-
-var nullLiteralBytes = []byte("null")
-
-// Only invoke the standard JSON encoder if there is actually something to
-// encode; otherwise write JSON null literal directly.
-func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
- if obj == nil {
- return nullLiteralBytes, nil
- }
- enc.resetReflectBuf()
- if err := enc.reflectEnc.Encode(obj); err != nil {
- return nil, err
- }
- enc.reflectBuf.TrimNewline()
- return enc.reflectBuf.Bytes(), nil
-}
-
-func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
- valueBytes, err := enc.encodeReflected(obj)
- if err != nil {
- return err
- }
- enc.addKey(key)
- _, err = enc.buf.Write(valueBytes)
- return err
-}
-
-func (enc *jsonEncoder) OpenNamespace(key string) {
- enc.addKey(key)
- enc.buf.AppendByte('{')
- enc.openNamespaces++
-}
-
-func (enc *jsonEncoder) AddString(key, val string) {
- enc.addKey(key)
- enc.AppendString(val)
-}
-
-func (enc *jsonEncoder) AddTime(key string, val time.Time) {
- enc.addKey(key)
- enc.AppendTime(val)
-}
-
-func (enc *jsonEncoder) AddUint64(key string, val uint64) {
- enc.addKey(key)
- enc.AppendUint64(val)
-}
-
-func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
- enc.addElementSeparator()
- enc.buf.AppendByte('[')
- err := arr.MarshalLogArray(enc)
- enc.buf.AppendByte(']')
- return err
-}
-
-func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
- // Close ONLY new openNamespaces that are created during
- // AppendObject().
- old := enc.openNamespaces
- enc.openNamespaces = 0
- enc.addElementSeparator()
- enc.buf.AppendByte('{')
- err := obj.MarshalLogObject(enc)
- enc.buf.AppendByte('}')
- enc.closeOpenNamespaces()
- enc.openNamespaces = old
- return err
-}
-
-func (enc *jsonEncoder) AppendBool(val bool) {
- enc.addElementSeparator()
- enc.buf.AppendBool(val)
-}
-
-func (enc *jsonEncoder) AppendByteString(val []byte) {
- enc.addElementSeparator()
- enc.buf.AppendByte('"')
- enc.safeAddByteString(val)
- enc.buf.AppendByte('"')
-}
-
-// appendComplex appends the encoded form of the provided complex128 value.
-// precision specifies the encoding precision for the real and imaginary
-// components of the complex number.
-func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
- enc.addElementSeparator()
- // Cast to a platform-independent, fixed-size type.
- r, i := float64(real(val)), float64(imag(val))
- enc.buf.AppendByte('"')
- // Because we're always in a quoted string, we can use strconv without
- // special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, precision)
- // If imaginary part is less than 0, minus (-) sign is added by default
- // by AppendFloat.
- if i >= 0 {
- enc.buf.AppendByte('+')
- }
- enc.buf.AppendFloat(i, precision)
- enc.buf.AppendByte('i')
- enc.buf.AppendByte('"')
-}
-
-func (enc *jsonEncoder) AppendDuration(val time.Duration) {
- cur := enc.buf.Len()
- if e := enc.EncodeDuration; e != nil {
- e(val, enc)
- }
- if cur == enc.buf.Len() {
- // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
- // JSON valid.
- enc.AppendInt64(int64(val))
- }
-}
-
-func (enc *jsonEncoder) AppendInt64(val int64) {
- enc.addElementSeparator()
- enc.buf.AppendInt(val)
-}
-
-func (enc *jsonEncoder) AppendReflected(val interface{}) error {
- valueBytes, err := enc.encodeReflected(val)
- if err != nil {
- return err
- }
- enc.addElementSeparator()
- _, err = enc.buf.Write(valueBytes)
- return err
-}
-
-func (enc *jsonEncoder) AppendString(val string) {
- enc.addElementSeparator()
- enc.buf.AppendByte('"')
- enc.safeAddString(val)
- enc.buf.AppendByte('"')
-}
-
-func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
- enc.addElementSeparator()
- enc.buf.AppendByte('"')
- enc.buf.AppendTime(time, layout)
- enc.buf.AppendByte('"')
-}
-
-func (enc *jsonEncoder) AppendTime(val time.Time) {
- cur := enc.buf.Len()
- if e := enc.EncodeTime; e != nil {
- e(val, enc)
- }
- if cur == enc.buf.Len() {
- // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
- // output JSON valid.
- enc.AppendInt64(val.UnixNano())
- }
-}
-
-func (enc *jsonEncoder) AppendUint64(val uint64) {
- enc.addElementSeparator()
- enc.buf.AppendUint(val)
-}
-
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
-func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
-
-func (enc *jsonEncoder) Clone() Encoder {
- clone := enc.clone()
- clone.buf.Write(enc.buf.Bytes())
- return clone
-}
-
-func (enc *jsonEncoder) clone() *jsonEncoder {
- clone := getJSONEncoder()
- clone.EncoderConfig = enc.EncoderConfig
- clone.spaced = enc.spaced
- clone.openNamespaces = enc.openNamespaces
- clone.buf = bufferpool.Get()
- return clone
-}
-
-func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
- final := enc.clone()
- final.buf.AppendByte('{')
-
- if final.LevelKey != "" && final.EncodeLevel != nil {
- final.addKey(final.LevelKey)
- cur := final.buf.Len()
- final.EncodeLevel(ent.Level, final)
- if cur == final.buf.Len() {
- // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
- // output JSON valid.
- final.AppendString(ent.Level.String())
- }
- }
- if final.TimeKey != "" {
- final.AddTime(final.TimeKey, ent.Time)
- }
- if ent.LoggerName != "" && final.NameKey != "" {
- final.addKey(final.NameKey)
- cur := final.buf.Len()
- nameEncoder := final.EncodeName
-
- // if no name encoder provided, fall back to FullNameEncoder for backwards
- // compatibility
- if nameEncoder == nil {
- nameEncoder = FullNameEncoder
- }
-
- nameEncoder(ent.LoggerName, final)
- if cur == final.buf.Len() {
- // User-supplied EncodeName was a no-op. Fall back to strings to
- // keep output JSON valid.
- final.AppendString(ent.LoggerName)
- }
- }
- if ent.Caller.Defined {
- if final.CallerKey != "" {
- final.addKey(final.CallerKey)
- cur := final.buf.Len()
- final.EncodeCaller(ent.Caller, final)
- if cur == final.buf.Len() {
- // User-supplied EncodeCaller was a no-op. Fall back to strings to
- // keep output JSON valid.
- final.AppendString(ent.Caller.String())
- }
- }
- if final.FunctionKey != "" {
- final.addKey(final.FunctionKey)
- final.AppendString(ent.Caller.Function)
- }
- }
- if final.MessageKey != "" {
- final.addKey(enc.MessageKey)
- final.AppendString(ent.Message)
- }
- if enc.buf.Len() > 0 {
- final.addElementSeparator()
- final.buf.Write(enc.buf.Bytes())
- }
- addFields(final, fields)
- final.closeOpenNamespaces()
- if ent.Stack != "" && final.StacktraceKey != "" {
- final.AddString(final.StacktraceKey, ent.Stack)
- }
- final.buf.AppendByte('}')
- final.buf.AppendString(final.LineEnding)
-
- ret := final.buf
- putJSONEncoder(final)
- return ret, nil
-}
-
-func (enc *jsonEncoder) truncate() {
- enc.buf.Reset()
-}
-
-func (enc *jsonEncoder) closeOpenNamespaces() {
- for i := 0; i < enc.openNamespaces; i++ {
- enc.buf.AppendByte('}')
- }
- enc.openNamespaces = 0
-}
-
-func (enc *jsonEncoder) addKey(key string) {
- enc.addElementSeparator()
- enc.buf.AppendByte('"')
- enc.safeAddString(key)
- enc.buf.AppendByte('"')
- enc.buf.AppendByte(':')
- if enc.spaced {
- enc.buf.AppendByte(' ')
- }
-}
-
-func (enc *jsonEncoder) addElementSeparator() {
- last := enc.buf.Len() - 1
- if last < 0 {
- return
- }
- switch enc.buf.Bytes()[last] {
- case '{', '[', ':', ',', ' ':
- return
- default:
- enc.buf.AppendByte(',')
- if enc.spaced {
- enc.buf.AppendByte(' ')
- }
- }
-}
-
-func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
- enc.addElementSeparator()
- switch {
- case math.IsNaN(val):
- enc.buf.AppendString(`"NaN"`)
- case math.IsInf(val, 1):
- enc.buf.AppendString(`"+Inf"`)
- case math.IsInf(val, -1):
- enc.buf.AppendString(`"-Inf"`)
- default:
- enc.buf.AppendFloat(val, bitSize)
- }
-}
-
-// safeAddString JSON-escapes a string and appends it to the internal buffer.
-// Unlike the standard library's encoder, it doesn't attempt to protect the
-// user from browser vulnerabilities or JSONP-related problems.
-func (enc *jsonEncoder) safeAddString(s string) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.AppendString(s[i : i+size])
- i += size
- }
-}
-
-// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
-func (enc *jsonEncoder) safeAddByteString(s []byte) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRune(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.Write(s[i : i+size])
- i += size
- }
-}
-
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
- if b >= utf8.RuneSelf {
- return false
- }
- if 0x20 <= b && b != '\\' && b != '"' {
- enc.buf.AppendByte(b)
- return true
- }
- switch b {
- case '\\', '"':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte(b)
- case '\n':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('n')
- case '\r':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('r')
- case '\t':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('t')
- default:
- // Encode bytes < 0x20, except for the escape sequences above.
- enc.buf.AppendString(`\u00`)
- enc.buf.AppendByte(_hex[b>>4])
- enc.buf.AppendByte(_hex[b&0xF])
- }
- return true
-}
-
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
- if r == utf8.RuneError && size == 1 {
- enc.buf.AppendString(`\ufffd`)
- return true
- }
- return false
-}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
deleted file mode 100644
index e01a241316..0000000000
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "bytes"
- "errors"
- "fmt"
-)
-
-var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
-
-// A Level is a logging priority. Higher levels are more important.
-type Level int8
-
-const (
- // DebugLevel logs are typically voluminous, and are usually disabled in
- // production.
- DebugLevel Level = iota - 1
- // InfoLevel is the default logging priority.
- InfoLevel
- // WarnLevel logs are more important than Info, but don't need individual
- // human review.
- WarnLevel
- // ErrorLevel logs are high-priority. If an application is running smoothly,
- // it shouldn't generate any error-level logs.
- ErrorLevel
- // DPanicLevel logs are particularly important errors. In development the
- // logger panics after writing the message.
- DPanicLevel
- // PanicLevel logs a message, then panics.
- PanicLevel
- // FatalLevel logs a message, then calls os.Exit(1).
- FatalLevel
-
- _minLevel = DebugLevel
- _maxLevel = FatalLevel
-
- // InvalidLevel is an invalid value for Level.
- //
- // Core implementations may panic if they see messages of this level.
- InvalidLevel = _maxLevel + 1
-)
-
-// ParseLevel parses a level based on the lower-case or all-caps ASCII
-// representation of the log level. If the provided ASCII representation is
-// invalid an error is returned.
-//
-// This is particularly useful when dealing with text input to configure log
-// levels.
-func ParseLevel(text string) (Level, error) {
- var level Level
- err := level.UnmarshalText([]byte(text))
- return level, err
-}
-
-type leveledEnabler interface {
- LevelEnabler
-
- Level() Level
-}
-
-// LevelOf reports the minimum enabled log level for the given LevelEnabler
-// from Zap's supported log levels, or [InvalidLevel] if none of them are
-// enabled.
-//
-// A LevelEnabler may implement a 'Level() Level' method to override the
-// behavior of this function.
-//
-// func (c *core) Level() Level {
-// return c.currentLevel
-// }
-//
-// It is recommended that [Core] implementations that wrap other cores use
-// LevelOf to retrieve the level of the wrapped core. For example,
-//
-// func (c *coreWrapper) Level() Level {
-// return zapcore.LevelOf(c.wrappedCore)
-// }
-func LevelOf(enab LevelEnabler) Level {
- if lvler, ok := enab.(leveledEnabler); ok {
- return lvler.Level()
- }
-
- for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
- if enab.Enabled(lvl) {
- return lvl
- }
- }
-
- return InvalidLevel
-}
-
-// String returns a lower-case ASCII representation of the log level.
-func (l Level) String() string {
- switch l {
- case DebugLevel:
- return "debug"
- case InfoLevel:
- return "info"
- case WarnLevel:
- return "warn"
- case ErrorLevel:
- return "error"
- case DPanicLevel:
- return "dpanic"
- case PanicLevel:
- return "panic"
- case FatalLevel:
- return "fatal"
- default:
- return fmt.Sprintf("Level(%d)", l)
- }
-}
-
-// CapitalString returns an all-caps ASCII representation of the log level.
-func (l Level) CapitalString() string {
- // Printing levels in all-caps is common enough that we should export this
- // functionality.
- switch l {
- case DebugLevel:
- return "DEBUG"
- case InfoLevel:
- return "INFO"
- case WarnLevel:
- return "WARN"
- case ErrorLevel:
- return "ERROR"
- case DPanicLevel:
- return "DPANIC"
- case PanicLevel:
- return "PANIC"
- case FatalLevel:
- return "FATAL"
- default:
- return fmt.Sprintf("LEVEL(%d)", l)
- }
-}
-
-// MarshalText marshals the Level to text. Note that the text representation
-// drops the -Level suffix (see example).
-func (l Level) MarshalText() ([]byte, error) {
- return []byte(l.String()), nil
-}
-
-// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
-// expects the text representation of a Level to drop the -Level suffix (see
-// example).
-//
-// In particular, this makes it easy to configure logging levels using YAML,
-// TOML, or JSON files.
-func (l *Level) UnmarshalText(text []byte) error {
- if l == nil {
- return errUnmarshalNilLevel
- }
- if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
- return fmt.Errorf("unrecognized level: %q", text)
- }
- return nil
-}
-
-func (l *Level) unmarshalText(text []byte) bool {
- switch string(text) {
- case "debug", "DEBUG":
- *l = DebugLevel
- case "info", "INFO", "": // make the zero value useful
- *l = InfoLevel
- case "warn", "WARN":
- *l = WarnLevel
- case "error", "ERROR":
- *l = ErrorLevel
- case "dpanic", "DPANIC":
- *l = DPanicLevel
- case "panic", "PANIC":
- *l = PanicLevel
- case "fatal", "FATAL":
- *l = FatalLevel
- default:
- return false
- }
- return true
-}
-
-// Set sets the level for the flag.Value interface.
-func (l *Level) Set(s string) error {
- return l.UnmarshalText([]byte(s))
-}
-
-// Get gets the level for the flag.Getter interface.
-func (l *Level) Get() interface{} {
- return *l
-}
-
-// Enabled returns true if the given level is at or above this level.
-func (l Level) Enabled(lvl Level) bool {
- return lvl >= l
-}
-
-// LevelEnabler decides whether a given logging level is enabled when logging a
-// message.
-//
-// Enablers are intended to be used to implement deterministic filters;
-// concerns like sampling are better implemented as a Core.
-//
-// Each concrete Level value implements a static LevelEnabler which returns
-// true for itself and all higher logging levels. For example WarnLevel.Enabled()
-// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
-// FatalLevel, but return false for InfoLevel and DebugLevel.
-type LevelEnabler interface {
- Enabled(Level) bool
-}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
deleted file mode 100644
index 7af8dadcb3..0000000000
--- a/vendor/go.uber.org/zap/zapcore/level_strings.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "go.uber.org/zap/internal/color"
-
-var (
- _levelToColor = map[Level]color.Color{
- DebugLevel: color.Magenta,
- InfoLevel: color.Blue,
- WarnLevel: color.Yellow,
- ErrorLevel: color.Red,
- DPanicLevel: color.Red,
- PanicLevel: color.Red,
- FatalLevel: color.Red,
- }
- _unknownLevelColor = color.Red
-
- _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
- _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
-)
-
-func init() {
- for level, color := range _levelToColor {
- _levelToLowercaseColorString[level] = color.Add(level.String())
- _levelToCapitalColorString[level] = color.Add(level.CapitalString())
- }
-}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
deleted file mode 100644
index c3c55ba0d9..0000000000
--- a/vendor/go.uber.org/zap/zapcore/marshaler.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-// ObjectMarshaler allows user-defined types to efficiently add themselves to the
-// logging context, and to selectively omit information which shouldn't be
-// included in logs (e.g., passwords).
-//
-// Note: ObjectMarshaler is only used when zap.Object is used or when
-// passed directly to zap.Any. It is not used when reflection-based
-// encoding is used.
-type ObjectMarshaler interface {
- MarshalLogObject(ObjectEncoder) error
-}
-
-// ObjectMarshalerFunc is a type adapter that turns a function into an
-// ObjectMarshaler.
-type ObjectMarshalerFunc func(ObjectEncoder) error
-
-// MarshalLogObject calls the underlying function.
-func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
- return f(enc)
-}
-
-// ArrayMarshaler allows user-defined types to efficiently add themselves to the
-// logging context, and to selectively omit information which shouldn't be
-// included in logs (e.g., passwords).
-//
-// Note: ArrayMarshaler is only used when zap.Array is used or when
-// passed directly to zap.Any. It is not used when reflection-based
-// encoding is used.
-type ArrayMarshaler interface {
- MarshalLogArray(ArrayEncoder) error
-}
-
-// ArrayMarshalerFunc is a type adapter that turns a function into an
-// ArrayMarshaler.
-type ArrayMarshalerFunc func(ArrayEncoder) error
-
-// MarshalLogArray calls the underlying function.
-func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
- return f(enc)
-}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
deleted file mode 100644
index dfead0829d..0000000000
--- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "time"
-
-// MapObjectEncoder is an ObjectEncoder backed by a simple
-// map[string]interface{}. It's not fast enough for production use, but it's
-// helpful in tests.
-type MapObjectEncoder struct {
- // Fields contains the entire encoded log context.
- Fields map[string]interface{}
- // cur is a pointer to the namespace we're currently writing to.
- cur map[string]interface{}
-}
-
-// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
-func NewMapObjectEncoder() *MapObjectEncoder {
- m := make(map[string]interface{})
- return &MapObjectEncoder{
- Fields: m,
- cur: m,
- }
-}
-
-// AddArray implements ObjectEncoder.
-func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
- arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
- err := v.MarshalLogArray(arr)
- m.cur[key] = arr.elems
- return err
-}
-
-// AddObject implements ObjectEncoder.
-func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
- newMap := NewMapObjectEncoder()
- m.cur[k] = newMap.Fields
- return v.MarshalLogObject(newMap)
-}
-
-// AddBinary implements ObjectEncoder.
-func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
-
-// AddByteString implements ObjectEncoder.
-func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
-
-// AddBool implements ObjectEncoder.
-func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
-
-// AddDuration implements ObjectEncoder.
-func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
-
-// AddComplex128 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
-
-// AddComplex64 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
-
-// AddFloat64 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
-
-// AddFloat32 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
-
-// AddInt implements ObjectEncoder.
-func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
-
-// AddInt64 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
-
-// AddInt32 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
-
-// AddInt16 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
-
-// AddInt8 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
-
-// AddString implements ObjectEncoder.
-func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
-
-// AddTime implements ObjectEncoder.
-func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
-
-// AddUint implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
-
-// AddUint64 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
-
-// AddUint32 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
-
-// AddUint16 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
-
-// AddUint8 implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
-
-// AddUintptr implements ObjectEncoder.
-func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
-
-// AddReflected implements ObjectEncoder.
-func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
- m.cur[k] = v
- return nil
-}
-
-// OpenNamespace implements ObjectEncoder.
-func (m *MapObjectEncoder) OpenNamespace(k string) {
- ns := make(map[string]interface{})
- m.cur[k] = ns
- m.cur = ns
-}
-
-// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
-// the MapObjectEncoder, it's not designed for production use.
-type sliceArrayEncoder struct {
- elems []interface{}
-}
-
-func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
- enc := &sliceArrayEncoder{}
- err := v.MarshalLogArray(enc)
- s.elems = append(s.elems, enc.elems)
- return err
-}
-
-func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
- m := NewMapObjectEncoder()
- err := v.MarshalLogObject(m)
- s.elems = append(s.elems, m.Fields)
- return err
-}
-
-func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
- s.elems = append(s.elems, v)
- return nil
-}
-
-func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) }
-func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
-func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
deleted file mode 100644
index 8746360eca..0000000000
--- a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "encoding/json"
- "io"
-)
-
-// ReflectedEncoder serializes log fields that can't be serialized with Zap's
-// JSON encoder. These have the ReflectType field type.
-// Use EncoderConfig.NewReflectedEncoder to set this.
-type ReflectedEncoder interface {
- // Encode encodes and writes to the underlying data stream.
- Encode(interface{}) error
-}
-
-func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
- enc := json.NewEncoder(w)
- // For consistency with our custom JSON encoder.
- enc.SetEscapeHTML(false)
- return enc
-}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
deleted file mode 100644
index dc518055a4..0000000000
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) 2016-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "time"
-
- "go.uber.org/atomic"
-)
-
-const (
- _numLevels = _maxLevel - _minLevel + 1
- _countersPerLevel = 4096
-)
-
-type counter struct {
- resetAt atomic.Int64
- counter atomic.Uint64
-}
-
-type counters [_numLevels][_countersPerLevel]counter
-
-func newCounters() *counters {
- return &counters{}
-}
-
-func (cs *counters) get(lvl Level, key string) *counter {
- i := lvl - _minLevel
- j := fnv32a(key) % _countersPerLevel
- return &cs[i][j]
-}
-
-// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
-func fnv32a(s string) uint32 {
- const (
- offset32 = 2166136261
- prime32 = 16777619
- )
- hash := uint32(offset32)
- for i := 0; i < len(s); i++ {
- hash ^= uint32(s[i])
- hash *= prime32
- }
- return hash
-}
-
-func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
- tn := t.UnixNano()
- resetAfter := c.resetAt.Load()
- if resetAfter > tn {
- return c.counter.Inc()
- }
-
- c.counter.Store(1)
-
- newResetAfter := tn + tick.Nanoseconds()
- if !c.resetAt.CAS(resetAfter, newResetAfter) {
- // We raced with another goroutine trying to reset, and it also reset
- // the counter to 1, so we need to reincrement the counter.
- return c.counter.Inc()
- }
-
- return 1
-}
-
-// SamplingDecision is a decision represented as a bit field made by sampler.
-// More decisions may be added in the future.
-type SamplingDecision uint32
-
-const (
- // LogDropped indicates that the Sampler dropped a log entry.
- LogDropped SamplingDecision = 1 << iota
- // LogSampled indicates that the Sampler sampled a log entry.
- LogSampled
-)
-
-// optionFunc wraps a func so it satisfies the SamplerOption interface.
-type optionFunc func(*sampler)
-
-func (f optionFunc) apply(s *sampler) {
- f(s)
-}
-
-// SamplerOption configures a Sampler.
-type SamplerOption interface {
- apply(*sampler)
-}
-
-// nopSamplingHook is the default hook used by sampler.
-func nopSamplingHook(Entry, SamplingDecision) {}
-
-// SamplerHook registers a function which will be called when Sampler makes a
-// decision.
-//
-// This hook may be used to get visibility into the performance of the sampler.
-// For example, use it to track metrics of dropped versus sampled logs.
-//
-// var dropped atomic.Int64
-// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-// if dec&zapcore.LogDropped > 0 {
-// dropped.Inc()
-// }
-// })
-func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
- return optionFunc(func(s *sampler) {
- s.hook = hook
- })
-}
-
-// NewSamplerWithOptions creates a Core that samples incoming entries, which
-// caps the CPU and I/O load of logging while attempting to preserve a
-// representative subset of your logs.
-//
-// Zap samples by logging the first N entries with a given level and message
-// each tick. If more Entries with the same level and message are seen during
-// the same interval, every Mth message is logged and the rest are dropped.
-//
-// For example,
-//
-// core = NewSamplerWithOptions(core, time.Second, 10, 5)
-//
-// This will log the first 10 log entries with the same level and message
-// in a one second interval as-is. Following that, it will allow through
-// every 5th log entry with the same level and message in that interval.
-//
-// If thereafter is zero, the Core will drop all log entries after the first N
-// in that interval.
-//
-// Sampler can be configured to report sampling decisions with the SamplerHook
-// option.
-//
-// Keep in mind that Zap's sampling implementation is optimized for speed over
-// absolute precision; under load, each tick may be slightly over- or
-// under-sampled.
-func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
- s := &sampler{
- Core: core,
- tick: tick,
- counts: newCounters(),
- first: uint64(first),
- thereafter: uint64(thereafter),
- hook: nopSamplingHook,
- }
- for _, opt := range opts {
- opt.apply(s)
- }
-
- return s
-}
-
-type sampler struct {
- Core
-
- counts *counters
- tick time.Duration
- first, thereafter uint64
- hook func(Entry, SamplingDecision)
-}
-
-var (
- _ Core = (*sampler)(nil)
- _ leveledEnabler = (*sampler)(nil)
-)
-
-// NewSampler creates a Core that samples incoming entries, which
-// caps the CPU and I/O load of logging while attempting to preserve a
-// representative subset of your logs.
-//
-// Zap samples by logging the first N entries with a given level and message
-// each tick. If more Entries with the same level and message are seen during
-// the same interval, every Mth message is logged and the rest are dropped.
-//
-// Keep in mind that zap's sampling implementation is optimized for speed over
-// absolute precision; under load, each tick may be slightly over- or
-// under-sampled.
-//
-// Deprecated: use NewSamplerWithOptions.
-func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
- return NewSamplerWithOptions(core, tick, first, thereafter)
-}
-
-func (s *sampler) Level() Level {
- return LevelOf(s.Core)
-}
-
-func (s *sampler) With(fields []Field) Core {
- return &sampler{
- Core: s.Core.With(fields),
- tick: s.tick,
- counts: s.counts,
- first: s.first,
- thereafter: s.thereafter,
- hook: s.hook,
- }
-}
-
-func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
- if !s.Enabled(ent.Level) {
- return ce
- }
-
- if ent.Level >= _minLevel && ent.Level <= _maxLevel {
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
- s.hook(ent, LogDropped)
- return ce
- }
- s.hook(ent, LogSampled)
- }
- return s.Core.Check(ent, ce)
-}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
deleted file mode 100644
index 9bb32f0557..0000000000
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2016-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import "go.uber.org/multierr"
-
-type multiCore []Core
-
-var (
- _ leveledEnabler = multiCore(nil)
- _ Core = multiCore(nil)
-)
-
-// NewTee creates a Core that duplicates log entries into two or more
-// underlying Cores.
-//
-// Calling it with a single Core returns the input unchanged, and calling
-// it with no input returns a no-op Core.
-func NewTee(cores ...Core) Core {
- switch len(cores) {
- case 0:
- return NewNopCore()
- case 1:
- return cores[0]
- default:
- return multiCore(cores)
- }
-}
-
-func (mc multiCore) With(fields []Field) Core {
- clone := make(multiCore, len(mc))
- for i := range mc {
- clone[i] = mc[i].With(fields)
- }
- return clone
-}
-
-func (mc multiCore) Level() Level {
- minLvl := _maxLevel // mc is never empty
- for i := range mc {
- if lvl := LevelOf(mc[i]); lvl < minLvl {
- minLvl = lvl
- }
- }
- return minLvl
-}
-
-func (mc multiCore) Enabled(lvl Level) bool {
- for i := range mc {
- if mc[i].Enabled(lvl) {
- return true
- }
- }
- return false
-}
-
-func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
- for i := range mc {
- ce = mc[i].Check(ent, ce)
- }
- return ce
-}
-
-func (mc multiCore) Write(ent Entry, fields []Field) error {
- var err error
- for i := range mc {
- err = multierr.Append(err, mc[i].Write(ent, fields))
- }
- return err
-}
-
-func (mc multiCore) Sync() error {
- var err error
- for i := range mc {
- err = multierr.Append(err, mc[i].Sync())
- }
- return err
-}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
deleted file mode 100644
index d4a1af3d07..0000000000
--- a/vendor/go.uber.org/zap/zapcore/write_syncer.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zapcore
-
-import (
- "io"
- "sync"
-
- "go.uber.org/multierr"
-)
-
-// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
-// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
-type WriteSyncer interface {
- io.Writer
- Sync() error
-}
-
-// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
-// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
-// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
-func AddSync(w io.Writer) WriteSyncer {
- switch w := w.(type) {
- case WriteSyncer:
- return w
- default:
- return writerWrapper{w}
- }
-}
-
-type lockedWriteSyncer struct {
- sync.Mutex
- ws WriteSyncer
-}
-
-// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
-// particular, *os.Files must be locked before use.
-func Lock(ws WriteSyncer) WriteSyncer {
- if _, ok := ws.(*lockedWriteSyncer); ok {
- // no need to layer on another lock
- return ws
- }
- return &lockedWriteSyncer{ws: ws}
-}
-
-func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
- s.Lock()
- n, err := s.ws.Write(bs)
- s.Unlock()
- return n, err
-}
-
-func (s *lockedWriteSyncer) Sync() error {
- s.Lock()
- err := s.ws.Sync()
- s.Unlock()
- return err
-}
-
-type writerWrapper struct {
- io.Writer
-}
-
-func (w writerWrapper) Sync() error {
- return nil
-}
-
-type multiWriteSyncer []WriteSyncer
-
-// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
-// and sync calls, much like io.MultiWriter.
-func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
- if len(ws) == 1 {
- return ws[0]
- }
- return multiWriteSyncer(ws)
-}
-
-// See https://golang.org/src/io/multi.go
-// When not all underlying syncers write the same number of bytes,
-// the smallest number is returned even though Write() is called on
-// all of them.
-func (ws multiWriteSyncer) Write(p []byte) (int, error) {
- var writeErr error
- nWritten := 0
- for _, w := range ws {
- n, err := w.Write(p)
- writeErr = multierr.Append(writeErr, err)
- if nWritten == 0 && n != 0 {
- nWritten = n
- } else if n < nWritten {
- nWritten = n
- }
- }
- return nWritten, writeErr
-}
-
-func (ws multiWriteSyncer) Sync() error {
- var err error
- for _, w := range ws {
- err = multierr.Append(err, w.Sync())
- }
- return err
-}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f965579f7d..ac90a2631c 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -1266,6 +1266,27 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
+ cancelRequest := func(cs *clientStream, err error) error {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
+ if cs.ID != 0 {
+ // This request may have failed because of a problem with the connection,
+ // or for some unrelated reason. (For example, the user might have canceled
+ // the request without waiting for a response.) Mark the connection as
+ // not reusable, since trying to reuse a dead connection is worse than
+ // unnecessarily creating a new one.
+ //
+ // If cs.ID is 0, then the request was never allocated a stream ID and
+ // whatever went wrong was unrelated to the connection. We might have
+ // timed out waiting for a stream slot when StrictMaxConcurrentStreams
+ // is set, for example, in which case retrying on a different connection
+ // will not help.
+ cs.cc.doNotReuse = true
+ }
+ return err
+ }
+
for {
select {
case <-cs.respHeaderRecv:
@@ -1280,15 +1301,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return handleResponseHeaders()
default:
waitDone()
- return nil, cs.abortErr
+ return nil, cancelRequest(cs, cs.abortErr)
}
case <-ctx.Done():
- err := ctx.Err()
- cs.abortStream(err)
- return nil, err
+ return nil, cancelRequest(cs, ctx.Err())
case <-cs.reqCancel:
- cs.abortStream(errRequestCanceled)
- return nil, errRequestCanceled
+ return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
index 97db2340ec..84fcc32b63 100644
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -289,7 +289,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
case AuthMethodNotRequired:
return nil
case AuthMethodUsernamePassword:
- if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
+ if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 {
return errors.New("invalid username/password")
}
b := []byte{authUsernamePasswordVersion}
diff --git a/vendor/golang.org/x/text/internal/language/common.go b/vendor/golang.org/x/text/internal/language/common.go
deleted file mode 100644
index cdfdb74971..0000000000
--- a/vendor/golang.org/x/text/internal/language/common.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package language
-
-// This file contains code common to the maketables.go and the package code.
-
-// AliasType is the type of an alias in AliasMap.
-type AliasType int8
-
-const (
- Deprecated AliasType = iota
- Macro
- Legacy
-
- AliasTypeUnknown AliasType = -1
-)
diff --git a/vendor/golang.org/x/text/internal/language/compact.go b/vendor/golang.org/x/text/internal/language/compact.go
deleted file mode 100644
index 46a0015074..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-// CompactCoreInfo is a compact integer with the three core tags encoded.
-type CompactCoreInfo uint32
-
-// GetCompactCore generates a uint32 value that is guaranteed to be unique for
-// different language, region, and script values.
-func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) {
- if t.LangID > langNoIndexOffset {
- return 0, false
- }
- cci |= CompactCoreInfo(t.LangID) << (8 + 12)
- cci |= CompactCoreInfo(t.ScriptID) << 12
- cci |= CompactCoreInfo(t.RegionID)
- return cci, true
-}
-
-// Tag generates a tag from c.
-func (c CompactCoreInfo) Tag() Tag {
- return Tag{
- LangID: Language(c >> 20),
- RegionID: Region(c & 0x3ff),
- ScriptID: Script(c>>12) & 0xff,
- }
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/compact.go b/vendor/golang.org/x/text/internal/language/compact/compact.go
deleted file mode 100644
index 1b36935ef7..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact/compact.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package compact defines a compact representation of language tags.
-//
-// Common language tags (at least all for which locale information is defined
-// in CLDR) are assigned a unique index. Each Tag is associated with such an
-// ID for selecting language-related resources (such as translations) as well
-// as one for selecting regional defaults (currency, number formatting, etc.)
-//
-// It may want to export this functionality at some point, but at this point
-// this is only available for use within x/text.
-package compact // import "golang.org/x/text/internal/language/compact"
-
-import (
- "sort"
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// ID is an integer identifying a single tag.
-type ID uint16
-
-func getCoreIndex(t language.Tag) (id ID, ok bool) {
- cci, ok := language.GetCompactCore(t)
- if !ok {
- return 0, false
- }
- i := sort.Search(len(coreTags), func(i int) bool {
- return cci <= coreTags[i]
- })
- if i == len(coreTags) || coreTags[i] != cci {
- return 0, false
- }
- return ID(i), true
-}
-
-// Parent returns the ID of the parent or the root ID if id is already the root.
-func (id ID) Parent() ID {
- return parents[id]
-}
-
-// Tag converts id to an internal language Tag.
-func (id ID) Tag() language.Tag {
- if int(id) >= len(coreTags) {
- return specialTags[int(id)-len(coreTags)]
- }
- return coreTags[id].Tag()
-}
-
-var specialTags []language.Tag
-
-func init() {
- tags := strings.Split(specialTagsStr, " ")
- specialTags = make([]language.Tag, len(tags))
- for i, t := range tags {
- specialTags[i] = language.MustParse(t)
- }
-}
diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go
deleted file mode 100644
index 8c1b6666fb..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact/language.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run gen.go gen_index.go -output tables.go
-//go:generate go run gen_parents.go
-
-package compact
-
-// TODO: Remove above NOTE after:
-// - verifying that tables are dropped correctly (most notably matcher tables).
-
-import (
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// Tag represents a BCP 47 language tag. It is used to specify an instance of a
-// specific language or locale. All language tag values are guaranteed to be
-// well-formed.
-type Tag struct {
- // NOTE: exported tags will become part of the public API.
- language ID
- locale ID
- full fullTag // always a language.Tag for now.
-}
-
-const _und = 0
-
-type fullTag interface {
- IsRoot() bool
- Parent() language.Tag
-}
-
-// Make a compact Tag from a fully specified internal language Tag.
-func Make(t language.Tag) (tag Tag) {
- if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" {
- if r, err := language.ParseRegion(region[:2]); err == nil {
- tFull := t
- t, _ = t.SetTypeForKey("rg", "")
- // TODO: should we not consider "va" for the language tag?
- var exact1, exact2 bool
- tag.language, exact1 = FromTag(t)
- t.RegionID = r
- tag.locale, exact2 = FromTag(t)
- if !exact1 || !exact2 {
- tag.full = tFull
- }
- return tag
- }
- }
- lang, ok := FromTag(t)
- tag.language = lang
- tag.locale = lang
- if !ok {
- tag.full = t
- }
- return tag
-}
-
-// Tag returns an internal language Tag version of this tag.
-func (t Tag) Tag() language.Tag {
- if t.full != nil {
- return t.full.(language.Tag)
- }
- tag := t.language.Tag()
- if t.language != t.locale {
- loc := t.locale.Tag()
- tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz")
- }
- return tag
-}
-
-// IsCompact reports whether this tag is fully defined in terms of ID.
-func (t *Tag) IsCompact() bool {
- return t.full == nil
-}
-
-// MayHaveVariants reports whether a tag may have variants. If it returns false
-// it is guaranteed the tag does not have variants.
-func (t Tag) MayHaveVariants() bool {
- return t.full != nil || int(t.language) >= len(coreTags)
-}
-
-// MayHaveExtensions reports whether a tag may have extensions. If it returns
-// false it is guaranteed the tag does not have them.
-func (t Tag) MayHaveExtensions() bool {
- return t.full != nil ||
- int(t.language) >= len(coreTags) ||
- t.language != t.locale
-}
-
-// IsRoot returns true if t is equal to language "und".
-func (t Tag) IsRoot() bool {
- if t.full != nil {
- return t.full.IsRoot()
- }
- return t.language == _und
-}
-
-// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
-// specific language are substituted with fields from the parent language.
-// The parent for a language may change for newer versions of CLDR.
-func (t Tag) Parent() Tag {
- if t.full != nil {
- return Make(t.full.Parent())
- }
- if t.language != t.locale {
- // Simulate stripping -u-rg-xxxxxx
- return Tag{language: t.language, locale: t.language}
- }
- // TODO: use parent lookup table once cycle from internal package is
- // removed. Probably by internalizing the table and declaring this fast
- // enough.
- // lang := compactID(internal.Parent(uint16(t.language)))
- lang, _ := FromTag(t.language.Tag().Parent())
- return Tag{language: lang, locale: lang}
-}
-
-// nextToken returns token t and the rest of the string.
-func nextToken(s string) (t, tail string) {
- p := strings.Index(s[1:], "-")
- if p == -1 {
- return s[1:], ""
- }
- p++
- return s[1:p], s[p:]
-}
-
-// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags
-// for which data exists in the text repository.The index will change over time
-// and should not be stored in persistent storage. If t does not match a compact
-// index, exact will be false and the compact index will be returned for the
-// first match after repeatedly taking the Parent of t.
-func LanguageID(t Tag) (id ID, exact bool) {
- return t.language, t.full == nil
-}
-
-// RegionalID returns the ID for the regional variant of this tag. This index is
-// used to indicate region-specific overrides, such as default currency, default
-// calendar and week data, default time cycle, and default measurement system
-// and unit preferences.
-//
-// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US
-// settings for currency, number formatting, etc. The CompactIndex for this tag
-// will be that for en-GB, while the RegionalID will be the one corresponding to
-// en-US.
-func RegionalID(t Tag) (id ID, exact bool) {
- return t.locale, t.full == nil
-}
-
-// LanguageTag returns t stripped of regional variant indicators.
-//
-// At the moment this means it is stripped of a regional and variant subtag "rg"
-// and "va" in the "u" extension.
-func (t Tag) LanguageTag() Tag {
- if t.full == nil {
- return Tag{language: t.language, locale: t.language}
- }
- tt := t.Tag()
- tt.SetTypeForKey("rg", "")
- tt.SetTypeForKey("va", "")
- return Make(tt)
-}
-
-// RegionalTag returns the regional variant of the tag.
-//
-// At the moment this means that the region is set from the regional subtag
-// "rg" in the "u" extension.
-func (t Tag) RegionalTag() Tag {
- rt := Tag{language: t.locale, locale: t.locale}
- if t.full == nil {
- return rt
- }
- b := language.Builder{}
- tag := t.Tag()
- // tag, _ = tag.SetTypeForKey("rg", "")
- b.SetTag(t.locale.Tag())
- if v := tag.Variants(); v != "" {
- for _, v := range strings.Split(v, "-") {
- b.AddVariant(v)
- }
- }
- for _, e := range tag.Extensions() {
- b.AddExt(e)
- }
- return t
-}
-
-// FromTag reports closest matching ID for an internal language Tag.
-func FromTag(t language.Tag) (id ID, exact bool) {
- // TODO: perhaps give more frequent tags a lower index.
- // TODO: we could make the indexes stable. This will excluded some
- // possibilities for optimization, so don't do this quite yet.
- exact = true
-
- b, s, r := t.Raw()
- if t.HasString() {
- if t.IsPrivateUse() {
- // We have no entries for user-defined tags.
- return 0, false
- }
- hasExtra := false
- if t.HasVariants() {
- if t.HasExtensions() {
- build := language.Builder{}
- build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r})
- build.AddVariant(t.Variants())
- exact = false
- t = build.Make()
- }
- hasExtra = true
- } else if _, ok := t.Extension('u'); ok {
- // TODO: va may mean something else. Consider not considering it.
- // Strip all but the 'va' entry.
- old := t
- variant := t.TypeForKey("va")
- t = language.Tag{LangID: b, ScriptID: s, RegionID: r}
- if variant != "" {
- t, _ = t.SetTypeForKey("va", variant)
- hasExtra = true
- }
- exact = old == t
- } else {
- exact = false
- }
- if hasExtra {
- // We have some variants.
- for i, s := range specialTags {
- if s == t {
- return ID(i + len(coreTags)), exact
- }
- }
- exact = false
- }
- }
- if x, ok := getCoreIndex(t); ok {
- return x, exact
- }
- exact = false
- if r != 0 && s == 0 {
- // Deal with cases where an extra script is inserted for the region.
- t, _ := t.Maximize()
- if x, ok := getCoreIndex(t); ok {
- return x, exact
- }
- }
- for t = t.Parent(); t != root; t = t.Parent() {
- // No variants specified: just compare core components.
- // The key has the form lllssrrr, where l, s, and r are nibbles for
- // respectively the langID, scriptID, and regionID.
- if x, ok := getCoreIndex(t); ok {
- return x, exact
- }
- }
- return 0, exact
-}
-
-var root = language.Tag{}
diff --git a/vendor/golang.org/x/text/internal/language/compact/parents.go b/vendor/golang.org/x/text/internal/language/compact/parents.go
deleted file mode 100644
index 8d810723c7..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact/parents.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package compact
-
-// parents maps a compact index of a tag to the compact index of the parent of
-// this tag.
-var parents = []ID{ // 775 elements
- // Entry 0 - 3F
- 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006,
- 0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
- 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
- 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
- 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000,
- 0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000,
- 0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000,
- 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e,
- // Entry 40 - 7F
- 0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046,
- 0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000,
- 0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000,
- 0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d,
- 0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066,
- 0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b,
- 0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000,
- 0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e,
- // Entry 80 - BF
- 0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086,
- 0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087,
- 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088,
- 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087,
- 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086,
- // Entry C0 - FF
- 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087,
- 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
- 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087,
- 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000,
- 0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2,
- 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1,
- // Entry 100 - 13F
- 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1,
- 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e,
- 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000,
- 0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e,
- 0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123,
- 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
- 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
- 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
- // Entry 140 - 17F
- 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
- 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
- 0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
- 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c,
- 0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000,
- 0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000,
- 0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176,
- 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e,
- // Entry 180 - 1BF
- 0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184,
- 0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e,
- 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000,
- 0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000,
- 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
- 0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000,
- 0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6,
- 0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000,
- // Entry 1C0 - 1FF
- 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000,
- 0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb,
- 0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000,
- 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000,
- 0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
- 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
- 0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5,
- 0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000,
- // Entry 200 - 23F
- 0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000,
- 0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000,
- 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000,
- 0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226,
- 0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000,
- 0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236,
- 0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244,
- // Entry 240 - 27F
- 0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000,
- 0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000,
- 0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254,
- 0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000,
- 0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
- 0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e,
- 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273,
- 0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000,
- // Entry 280 - 2BF
- 0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286,
- 0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000,
- 0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295,
- 0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d,
- 0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000,
- 0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae,
- 0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5,
- 0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000,
- // Entry 2C0 - 2FF
- 0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000,
- 0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd,
- 0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000,
- 0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000,
- 0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6,
- 0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000,
- 0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000,
- 0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000,
- // Entry 300 - 33F
- 0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6,
-} // Size: 1574 bytes
-
-// Total table size 1574 bytes (1KiB); checksum: 895AAF0B
diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go
deleted file mode 100644
index 32af9de599..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact/tables.go
+++ /dev/null
@@ -1,1015 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package compact
-
-import "golang.org/x/text/internal/language"
-
-// CLDRVersion is the CLDR version from which the tables in this package are derived.
-const CLDRVersion = "32"
-
-// NumCompactTags is the number of common tags. The maximum tag is
-// NumCompactTags-1.
-const NumCompactTags = 775
-const (
- undIndex ID = 0
- afIndex ID = 1
- afNAIndex ID = 2
- afZAIndex ID = 3
- agqIndex ID = 4
- agqCMIndex ID = 5
- akIndex ID = 6
- akGHIndex ID = 7
- amIndex ID = 8
- amETIndex ID = 9
- arIndex ID = 10
- ar001Index ID = 11
- arAEIndex ID = 12
- arBHIndex ID = 13
- arDJIndex ID = 14
- arDZIndex ID = 15
- arEGIndex ID = 16
- arEHIndex ID = 17
- arERIndex ID = 18
- arILIndex ID = 19
- arIQIndex ID = 20
- arJOIndex ID = 21
- arKMIndex ID = 22
- arKWIndex ID = 23
- arLBIndex ID = 24
- arLYIndex ID = 25
- arMAIndex ID = 26
- arMRIndex ID = 27
- arOMIndex ID = 28
- arPSIndex ID = 29
- arQAIndex ID = 30
- arSAIndex ID = 31
- arSDIndex ID = 32
- arSOIndex ID = 33
- arSSIndex ID = 34
- arSYIndex ID = 35
- arTDIndex ID = 36
- arTNIndex ID = 37
- arYEIndex ID = 38
- arsIndex ID = 39
- asIndex ID = 40
- asINIndex ID = 41
- asaIndex ID = 42
- asaTZIndex ID = 43
- astIndex ID = 44
- astESIndex ID = 45
- azIndex ID = 46
- azCyrlIndex ID = 47
- azCyrlAZIndex ID = 48
- azLatnIndex ID = 49
- azLatnAZIndex ID = 50
- basIndex ID = 51
- basCMIndex ID = 52
- beIndex ID = 53
- beBYIndex ID = 54
- bemIndex ID = 55
- bemZMIndex ID = 56
- bezIndex ID = 57
- bezTZIndex ID = 58
- bgIndex ID = 59
- bgBGIndex ID = 60
- bhIndex ID = 61
- bmIndex ID = 62
- bmMLIndex ID = 63
- bnIndex ID = 64
- bnBDIndex ID = 65
- bnINIndex ID = 66
- boIndex ID = 67
- boCNIndex ID = 68
- boINIndex ID = 69
- brIndex ID = 70
- brFRIndex ID = 71
- brxIndex ID = 72
- brxINIndex ID = 73
- bsIndex ID = 74
- bsCyrlIndex ID = 75
- bsCyrlBAIndex ID = 76
- bsLatnIndex ID = 77
- bsLatnBAIndex ID = 78
- caIndex ID = 79
- caADIndex ID = 80
- caESIndex ID = 81
- caFRIndex ID = 82
- caITIndex ID = 83
- ccpIndex ID = 84
- ccpBDIndex ID = 85
- ccpINIndex ID = 86
- ceIndex ID = 87
- ceRUIndex ID = 88
- cggIndex ID = 89
- cggUGIndex ID = 90
- chrIndex ID = 91
- chrUSIndex ID = 92
- ckbIndex ID = 93
- ckbIQIndex ID = 94
- ckbIRIndex ID = 95
- csIndex ID = 96
- csCZIndex ID = 97
- cuIndex ID = 98
- cuRUIndex ID = 99
- cyIndex ID = 100
- cyGBIndex ID = 101
- daIndex ID = 102
- daDKIndex ID = 103
- daGLIndex ID = 104
- davIndex ID = 105
- davKEIndex ID = 106
- deIndex ID = 107
- deATIndex ID = 108
- deBEIndex ID = 109
- deCHIndex ID = 110
- deDEIndex ID = 111
- deITIndex ID = 112
- deLIIndex ID = 113
- deLUIndex ID = 114
- djeIndex ID = 115
- djeNEIndex ID = 116
- dsbIndex ID = 117
- dsbDEIndex ID = 118
- duaIndex ID = 119
- duaCMIndex ID = 120
- dvIndex ID = 121
- dyoIndex ID = 122
- dyoSNIndex ID = 123
- dzIndex ID = 124
- dzBTIndex ID = 125
- ebuIndex ID = 126
- ebuKEIndex ID = 127
- eeIndex ID = 128
- eeGHIndex ID = 129
- eeTGIndex ID = 130
- elIndex ID = 131
- elCYIndex ID = 132
- elGRIndex ID = 133
- enIndex ID = 134
- en001Index ID = 135
- en150Index ID = 136
- enAGIndex ID = 137
- enAIIndex ID = 138
- enASIndex ID = 139
- enATIndex ID = 140
- enAUIndex ID = 141
- enBBIndex ID = 142
- enBEIndex ID = 143
- enBIIndex ID = 144
- enBMIndex ID = 145
- enBSIndex ID = 146
- enBWIndex ID = 147
- enBZIndex ID = 148
- enCAIndex ID = 149
- enCCIndex ID = 150
- enCHIndex ID = 151
- enCKIndex ID = 152
- enCMIndex ID = 153
- enCXIndex ID = 154
- enCYIndex ID = 155
- enDEIndex ID = 156
- enDGIndex ID = 157
- enDKIndex ID = 158
- enDMIndex ID = 159
- enERIndex ID = 160
- enFIIndex ID = 161
- enFJIndex ID = 162
- enFKIndex ID = 163
- enFMIndex ID = 164
- enGBIndex ID = 165
- enGDIndex ID = 166
- enGGIndex ID = 167
- enGHIndex ID = 168
- enGIIndex ID = 169
- enGMIndex ID = 170
- enGUIndex ID = 171
- enGYIndex ID = 172
- enHKIndex ID = 173
- enIEIndex ID = 174
- enILIndex ID = 175
- enIMIndex ID = 176
- enINIndex ID = 177
- enIOIndex ID = 178
- enJEIndex ID = 179
- enJMIndex ID = 180
- enKEIndex ID = 181
- enKIIndex ID = 182
- enKNIndex ID = 183
- enKYIndex ID = 184
- enLCIndex ID = 185
- enLRIndex ID = 186
- enLSIndex ID = 187
- enMGIndex ID = 188
- enMHIndex ID = 189
- enMOIndex ID = 190
- enMPIndex ID = 191
- enMSIndex ID = 192
- enMTIndex ID = 193
- enMUIndex ID = 194
- enMWIndex ID = 195
- enMYIndex ID = 196
- enNAIndex ID = 197
- enNFIndex ID = 198
- enNGIndex ID = 199
- enNLIndex ID = 200
- enNRIndex ID = 201
- enNUIndex ID = 202
- enNZIndex ID = 203
- enPGIndex ID = 204
- enPHIndex ID = 205
- enPKIndex ID = 206
- enPNIndex ID = 207
- enPRIndex ID = 208
- enPWIndex ID = 209
- enRWIndex ID = 210
- enSBIndex ID = 211
- enSCIndex ID = 212
- enSDIndex ID = 213
- enSEIndex ID = 214
- enSGIndex ID = 215
- enSHIndex ID = 216
- enSIIndex ID = 217
- enSLIndex ID = 218
- enSSIndex ID = 219
- enSXIndex ID = 220
- enSZIndex ID = 221
- enTCIndex ID = 222
- enTKIndex ID = 223
- enTOIndex ID = 224
- enTTIndex ID = 225
- enTVIndex ID = 226
- enTZIndex ID = 227
- enUGIndex ID = 228
- enUMIndex ID = 229
- enUSIndex ID = 230
- enVCIndex ID = 231
- enVGIndex ID = 232
- enVIIndex ID = 233
- enVUIndex ID = 234
- enWSIndex ID = 235
- enZAIndex ID = 236
- enZMIndex ID = 237
- enZWIndex ID = 238
- eoIndex ID = 239
- eo001Index ID = 240
- esIndex ID = 241
- es419Index ID = 242
- esARIndex ID = 243
- esBOIndex ID = 244
- esBRIndex ID = 245
- esBZIndex ID = 246
- esCLIndex ID = 247
- esCOIndex ID = 248
- esCRIndex ID = 249
- esCUIndex ID = 250
- esDOIndex ID = 251
- esEAIndex ID = 252
- esECIndex ID = 253
- esESIndex ID = 254
- esGQIndex ID = 255
- esGTIndex ID = 256
- esHNIndex ID = 257
- esICIndex ID = 258
- esMXIndex ID = 259
- esNIIndex ID = 260
- esPAIndex ID = 261
- esPEIndex ID = 262
- esPHIndex ID = 263
- esPRIndex ID = 264
- esPYIndex ID = 265
- esSVIndex ID = 266
- esUSIndex ID = 267
- esUYIndex ID = 268
- esVEIndex ID = 269
- etIndex ID = 270
- etEEIndex ID = 271
- euIndex ID = 272
- euESIndex ID = 273
- ewoIndex ID = 274
- ewoCMIndex ID = 275
- faIndex ID = 276
- faAFIndex ID = 277
- faIRIndex ID = 278
- ffIndex ID = 279
- ffCMIndex ID = 280
- ffGNIndex ID = 281
- ffMRIndex ID = 282
- ffSNIndex ID = 283
- fiIndex ID = 284
- fiFIIndex ID = 285
- filIndex ID = 286
- filPHIndex ID = 287
- foIndex ID = 288
- foDKIndex ID = 289
- foFOIndex ID = 290
- frIndex ID = 291
- frBEIndex ID = 292
- frBFIndex ID = 293
- frBIIndex ID = 294
- frBJIndex ID = 295
- frBLIndex ID = 296
- frCAIndex ID = 297
- frCDIndex ID = 298
- frCFIndex ID = 299
- frCGIndex ID = 300
- frCHIndex ID = 301
- frCIIndex ID = 302
- frCMIndex ID = 303
- frDJIndex ID = 304
- frDZIndex ID = 305
- frFRIndex ID = 306
- frGAIndex ID = 307
- frGFIndex ID = 308
- frGNIndex ID = 309
- frGPIndex ID = 310
- frGQIndex ID = 311
- frHTIndex ID = 312
- frKMIndex ID = 313
- frLUIndex ID = 314
- frMAIndex ID = 315
- frMCIndex ID = 316
- frMFIndex ID = 317
- frMGIndex ID = 318
- frMLIndex ID = 319
- frMQIndex ID = 320
- frMRIndex ID = 321
- frMUIndex ID = 322
- frNCIndex ID = 323
- frNEIndex ID = 324
- frPFIndex ID = 325
- frPMIndex ID = 326
- frREIndex ID = 327
- frRWIndex ID = 328
- frSCIndex ID = 329
- frSNIndex ID = 330
- frSYIndex ID = 331
- frTDIndex ID = 332
- frTGIndex ID = 333
- frTNIndex ID = 334
- frVUIndex ID = 335
- frWFIndex ID = 336
- frYTIndex ID = 337
- furIndex ID = 338
- furITIndex ID = 339
- fyIndex ID = 340
- fyNLIndex ID = 341
- gaIndex ID = 342
- gaIEIndex ID = 343
- gdIndex ID = 344
- gdGBIndex ID = 345
- glIndex ID = 346
- glESIndex ID = 347
- gswIndex ID = 348
- gswCHIndex ID = 349
- gswFRIndex ID = 350
- gswLIIndex ID = 351
- guIndex ID = 352
- guINIndex ID = 353
- guwIndex ID = 354
- guzIndex ID = 355
- guzKEIndex ID = 356
- gvIndex ID = 357
- gvIMIndex ID = 358
- haIndex ID = 359
- haGHIndex ID = 360
- haNEIndex ID = 361
- haNGIndex ID = 362
- hawIndex ID = 363
- hawUSIndex ID = 364
- heIndex ID = 365
- heILIndex ID = 366
- hiIndex ID = 367
- hiINIndex ID = 368
- hrIndex ID = 369
- hrBAIndex ID = 370
- hrHRIndex ID = 371
- hsbIndex ID = 372
- hsbDEIndex ID = 373
- huIndex ID = 374
- huHUIndex ID = 375
- hyIndex ID = 376
- hyAMIndex ID = 377
- idIndex ID = 378
- idIDIndex ID = 379
- igIndex ID = 380
- igNGIndex ID = 381
- iiIndex ID = 382
- iiCNIndex ID = 383
- inIndex ID = 384
- ioIndex ID = 385
- isIndex ID = 386
- isISIndex ID = 387
- itIndex ID = 388
- itCHIndex ID = 389
- itITIndex ID = 390
- itSMIndex ID = 391
- itVAIndex ID = 392
- iuIndex ID = 393
- iwIndex ID = 394
- jaIndex ID = 395
- jaJPIndex ID = 396
- jboIndex ID = 397
- jgoIndex ID = 398
- jgoCMIndex ID = 399
- jiIndex ID = 400
- jmcIndex ID = 401
- jmcTZIndex ID = 402
- jvIndex ID = 403
- jwIndex ID = 404
- kaIndex ID = 405
- kaGEIndex ID = 406
- kabIndex ID = 407
- kabDZIndex ID = 408
- kajIndex ID = 409
- kamIndex ID = 410
- kamKEIndex ID = 411
- kcgIndex ID = 412
- kdeIndex ID = 413
- kdeTZIndex ID = 414
- keaIndex ID = 415
- keaCVIndex ID = 416
- khqIndex ID = 417
- khqMLIndex ID = 418
- kiIndex ID = 419
- kiKEIndex ID = 420
- kkIndex ID = 421
- kkKZIndex ID = 422
- kkjIndex ID = 423
- kkjCMIndex ID = 424
- klIndex ID = 425
- klGLIndex ID = 426
- klnIndex ID = 427
- klnKEIndex ID = 428
- kmIndex ID = 429
- kmKHIndex ID = 430
- knIndex ID = 431
- knINIndex ID = 432
- koIndex ID = 433
- koKPIndex ID = 434
- koKRIndex ID = 435
- kokIndex ID = 436
- kokINIndex ID = 437
- ksIndex ID = 438
- ksINIndex ID = 439
- ksbIndex ID = 440
- ksbTZIndex ID = 441
- ksfIndex ID = 442
- ksfCMIndex ID = 443
- kshIndex ID = 444
- kshDEIndex ID = 445
- kuIndex ID = 446
- kwIndex ID = 447
- kwGBIndex ID = 448
- kyIndex ID = 449
- kyKGIndex ID = 450
- lagIndex ID = 451
- lagTZIndex ID = 452
- lbIndex ID = 453
- lbLUIndex ID = 454
- lgIndex ID = 455
- lgUGIndex ID = 456
- lktIndex ID = 457
- lktUSIndex ID = 458
- lnIndex ID = 459
- lnAOIndex ID = 460
- lnCDIndex ID = 461
- lnCFIndex ID = 462
- lnCGIndex ID = 463
- loIndex ID = 464
- loLAIndex ID = 465
- lrcIndex ID = 466
- lrcIQIndex ID = 467
- lrcIRIndex ID = 468
- ltIndex ID = 469
- ltLTIndex ID = 470
- luIndex ID = 471
- luCDIndex ID = 472
- luoIndex ID = 473
- luoKEIndex ID = 474
- luyIndex ID = 475
- luyKEIndex ID = 476
- lvIndex ID = 477
- lvLVIndex ID = 478
- masIndex ID = 479
- masKEIndex ID = 480
- masTZIndex ID = 481
- merIndex ID = 482
- merKEIndex ID = 483
- mfeIndex ID = 484
- mfeMUIndex ID = 485
- mgIndex ID = 486
- mgMGIndex ID = 487
- mghIndex ID = 488
- mghMZIndex ID = 489
- mgoIndex ID = 490
- mgoCMIndex ID = 491
- mkIndex ID = 492
- mkMKIndex ID = 493
- mlIndex ID = 494
- mlINIndex ID = 495
- mnIndex ID = 496
- mnMNIndex ID = 497
- moIndex ID = 498
- mrIndex ID = 499
- mrINIndex ID = 500
- msIndex ID = 501
- msBNIndex ID = 502
- msMYIndex ID = 503
- msSGIndex ID = 504
- mtIndex ID = 505
- mtMTIndex ID = 506
- muaIndex ID = 507
- muaCMIndex ID = 508
- myIndex ID = 509
- myMMIndex ID = 510
- mznIndex ID = 511
- mznIRIndex ID = 512
- nahIndex ID = 513
- naqIndex ID = 514
- naqNAIndex ID = 515
- nbIndex ID = 516
- nbNOIndex ID = 517
- nbSJIndex ID = 518
- ndIndex ID = 519
- ndZWIndex ID = 520
- ndsIndex ID = 521
- ndsDEIndex ID = 522
- ndsNLIndex ID = 523
- neIndex ID = 524
- neINIndex ID = 525
- neNPIndex ID = 526
- nlIndex ID = 527
- nlAWIndex ID = 528
- nlBEIndex ID = 529
- nlBQIndex ID = 530
- nlCWIndex ID = 531
- nlNLIndex ID = 532
- nlSRIndex ID = 533
- nlSXIndex ID = 534
- nmgIndex ID = 535
- nmgCMIndex ID = 536
- nnIndex ID = 537
- nnNOIndex ID = 538
- nnhIndex ID = 539
- nnhCMIndex ID = 540
- noIndex ID = 541
- nqoIndex ID = 542
- nrIndex ID = 543
- nsoIndex ID = 544
- nusIndex ID = 545
- nusSSIndex ID = 546
- nyIndex ID = 547
- nynIndex ID = 548
- nynUGIndex ID = 549
- omIndex ID = 550
- omETIndex ID = 551
- omKEIndex ID = 552
- orIndex ID = 553
- orINIndex ID = 554
- osIndex ID = 555
- osGEIndex ID = 556
- osRUIndex ID = 557
- paIndex ID = 558
- paArabIndex ID = 559
- paArabPKIndex ID = 560
- paGuruIndex ID = 561
- paGuruINIndex ID = 562
- papIndex ID = 563
- plIndex ID = 564
- plPLIndex ID = 565
- prgIndex ID = 566
- prg001Index ID = 567
- psIndex ID = 568
- psAFIndex ID = 569
- ptIndex ID = 570
- ptAOIndex ID = 571
- ptBRIndex ID = 572
- ptCHIndex ID = 573
- ptCVIndex ID = 574
- ptGQIndex ID = 575
- ptGWIndex ID = 576
- ptLUIndex ID = 577
- ptMOIndex ID = 578
- ptMZIndex ID = 579
- ptPTIndex ID = 580
- ptSTIndex ID = 581
- ptTLIndex ID = 582
- quIndex ID = 583
- quBOIndex ID = 584
- quECIndex ID = 585
- quPEIndex ID = 586
- rmIndex ID = 587
- rmCHIndex ID = 588
- rnIndex ID = 589
- rnBIIndex ID = 590
- roIndex ID = 591
- roMDIndex ID = 592
- roROIndex ID = 593
- rofIndex ID = 594
- rofTZIndex ID = 595
- ruIndex ID = 596
- ruBYIndex ID = 597
- ruKGIndex ID = 598
- ruKZIndex ID = 599
- ruMDIndex ID = 600
- ruRUIndex ID = 601
- ruUAIndex ID = 602
- rwIndex ID = 603
- rwRWIndex ID = 604
- rwkIndex ID = 605
- rwkTZIndex ID = 606
- sahIndex ID = 607
- sahRUIndex ID = 608
- saqIndex ID = 609
- saqKEIndex ID = 610
- sbpIndex ID = 611
- sbpTZIndex ID = 612
- sdIndex ID = 613
- sdPKIndex ID = 614
- sdhIndex ID = 615
- seIndex ID = 616
- seFIIndex ID = 617
- seNOIndex ID = 618
- seSEIndex ID = 619
- sehIndex ID = 620
- sehMZIndex ID = 621
- sesIndex ID = 622
- sesMLIndex ID = 623
- sgIndex ID = 624
- sgCFIndex ID = 625
- shIndex ID = 626
- shiIndex ID = 627
- shiLatnIndex ID = 628
- shiLatnMAIndex ID = 629
- shiTfngIndex ID = 630
- shiTfngMAIndex ID = 631
- siIndex ID = 632
- siLKIndex ID = 633
- skIndex ID = 634
- skSKIndex ID = 635
- slIndex ID = 636
- slSIIndex ID = 637
- smaIndex ID = 638
- smiIndex ID = 639
- smjIndex ID = 640
- smnIndex ID = 641
- smnFIIndex ID = 642
- smsIndex ID = 643
- snIndex ID = 644
- snZWIndex ID = 645
- soIndex ID = 646
- soDJIndex ID = 647
- soETIndex ID = 648
- soKEIndex ID = 649
- soSOIndex ID = 650
- sqIndex ID = 651
- sqALIndex ID = 652
- sqMKIndex ID = 653
- sqXKIndex ID = 654
- srIndex ID = 655
- srCyrlIndex ID = 656
- srCyrlBAIndex ID = 657
- srCyrlMEIndex ID = 658
- srCyrlRSIndex ID = 659
- srCyrlXKIndex ID = 660
- srLatnIndex ID = 661
- srLatnBAIndex ID = 662
- srLatnMEIndex ID = 663
- srLatnRSIndex ID = 664
- srLatnXKIndex ID = 665
- ssIndex ID = 666
- ssyIndex ID = 667
- stIndex ID = 668
- svIndex ID = 669
- svAXIndex ID = 670
- svFIIndex ID = 671
- svSEIndex ID = 672
- swIndex ID = 673
- swCDIndex ID = 674
- swKEIndex ID = 675
- swTZIndex ID = 676
- swUGIndex ID = 677
- syrIndex ID = 678
- taIndex ID = 679
- taINIndex ID = 680
- taLKIndex ID = 681
- taMYIndex ID = 682
- taSGIndex ID = 683
- teIndex ID = 684
- teINIndex ID = 685
- teoIndex ID = 686
- teoKEIndex ID = 687
- teoUGIndex ID = 688
- tgIndex ID = 689
- tgTJIndex ID = 690
- thIndex ID = 691
- thTHIndex ID = 692
- tiIndex ID = 693
- tiERIndex ID = 694
- tiETIndex ID = 695
- tigIndex ID = 696
- tkIndex ID = 697
- tkTMIndex ID = 698
- tlIndex ID = 699
- tnIndex ID = 700
- toIndex ID = 701
- toTOIndex ID = 702
- trIndex ID = 703
- trCYIndex ID = 704
- trTRIndex ID = 705
- tsIndex ID = 706
- ttIndex ID = 707
- ttRUIndex ID = 708
- twqIndex ID = 709
- twqNEIndex ID = 710
- tzmIndex ID = 711
- tzmMAIndex ID = 712
- ugIndex ID = 713
- ugCNIndex ID = 714
- ukIndex ID = 715
- ukUAIndex ID = 716
- urIndex ID = 717
- urINIndex ID = 718
- urPKIndex ID = 719
- uzIndex ID = 720
- uzArabIndex ID = 721
- uzArabAFIndex ID = 722
- uzCyrlIndex ID = 723
- uzCyrlUZIndex ID = 724
- uzLatnIndex ID = 725
- uzLatnUZIndex ID = 726
- vaiIndex ID = 727
- vaiLatnIndex ID = 728
- vaiLatnLRIndex ID = 729
- vaiVaiiIndex ID = 730
- vaiVaiiLRIndex ID = 731
- veIndex ID = 732
- viIndex ID = 733
- viVNIndex ID = 734
- voIndex ID = 735
- vo001Index ID = 736
- vunIndex ID = 737
- vunTZIndex ID = 738
- waIndex ID = 739
- waeIndex ID = 740
- waeCHIndex ID = 741
- woIndex ID = 742
- woSNIndex ID = 743
- xhIndex ID = 744
- xogIndex ID = 745
- xogUGIndex ID = 746
- yavIndex ID = 747
- yavCMIndex ID = 748
- yiIndex ID = 749
- yi001Index ID = 750
- yoIndex ID = 751
- yoBJIndex ID = 752
- yoNGIndex ID = 753
- yueIndex ID = 754
- yueHansIndex ID = 755
- yueHansCNIndex ID = 756
- yueHantIndex ID = 757
- yueHantHKIndex ID = 758
- zghIndex ID = 759
- zghMAIndex ID = 760
- zhIndex ID = 761
- zhHansIndex ID = 762
- zhHansCNIndex ID = 763
- zhHansHKIndex ID = 764
- zhHansMOIndex ID = 765
- zhHansSGIndex ID = 766
- zhHantIndex ID = 767
- zhHantHKIndex ID = 768
- zhHantMOIndex ID = 769
- zhHantTWIndex ID = 770
- zuIndex ID = 771
- zuZAIndex ID = 772
- caESvalenciaIndex ID = 773
- enUSuvaposixIndex ID = 774
-)
-
-var coreTags = []language.CompactCoreInfo{ // 773 elements
- // Entry 0 - 1F
- 0x00000000, 0x01600000, 0x016000d2, 0x01600161,
- 0x01c00000, 0x01c00052, 0x02100000, 0x02100080,
- 0x02700000, 0x0270006f, 0x03a00000, 0x03a00001,
- 0x03a00023, 0x03a00039, 0x03a00062, 0x03a00067,
- 0x03a0006b, 0x03a0006c, 0x03a0006d, 0x03a00097,
- 0x03a0009b, 0x03a000a1, 0x03a000a8, 0x03a000ac,
- 0x03a000b0, 0x03a000b9, 0x03a000ba, 0x03a000c9,
- 0x03a000e1, 0x03a000ed, 0x03a000f3, 0x03a00108,
- // Entry 20 - 3F
- 0x03a0010b, 0x03a00115, 0x03a00117, 0x03a0011c,
- 0x03a00120, 0x03a00128, 0x03a0015e, 0x04000000,
- 0x04300000, 0x04300099, 0x04400000, 0x0440012f,
- 0x04800000, 0x0480006e, 0x05800000, 0x05820000,
- 0x05820032, 0x0585a000, 0x0585a032, 0x05e00000,
- 0x05e00052, 0x07100000, 0x07100047, 0x07500000,
- 0x07500162, 0x07900000, 0x0790012f, 0x07e00000,
- 0x07e00038, 0x08200000, 0x0a000000, 0x0a0000c3,
- // Entry 40 - 5F
- 0x0a500000, 0x0a500035, 0x0a500099, 0x0a900000,
- 0x0a900053, 0x0a900099, 0x0b200000, 0x0b200078,
- 0x0b500000, 0x0b500099, 0x0b700000, 0x0b720000,
- 0x0b720033, 0x0b75a000, 0x0b75a033, 0x0d700000,
- 0x0d700022, 0x0d70006e, 0x0d700078, 0x0d70009e,
- 0x0db00000, 0x0db00035, 0x0db00099, 0x0dc00000,
- 0x0dc00106, 0x0df00000, 0x0df00131, 0x0e500000,
- 0x0e500135, 0x0e900000, 0x0e90009b, 0x0e90009c,
- // Entry 60 - 7F
- 0x0fa00000, 0x0fa0005e, 0x0fe00000, 0x0fe00106,
- 0x10000000, 0x1000007b, 0x10100000, 0x10100063,
- 0x10100082, 0x10800000, 0x108000a4, 0x10d00000,
- 0x10d0002e, 0x10d00036, 0x10d0004e, 0x10d00060,
- 0x10d0009e, 0x10d000b2, 0x10d000b7, 0x11700000,
- 0x117000d4, 0x11f00000, 0x11f00060, 0x12400000,
- 0x12400052, 0x12800000, 0x12b00000, 0x12b00114,
- 0x12d00000, 0x12d00043, 0x12f00000, 0x12f000a4,
- // Entry 80 - 9F
- 0x13000000, 0x13000080, 0x13000122, 0x13600000,
- 0x1360005d, 0x13600087, 0x13900000, 0x13900001,
- 0x1390001a, 0x13900025, 0x13900026, 0x1390002d,
- 0x1390002e, 0x1390002f, 0x13900034, 0x13900036,
- 0x1390003a, 0x1390003d, 0x13900042, 0x13900046,
- 0x13900048, 0x13900049, 0x1390004a, 0x1390004e,
- 0x13900050, 0x13900052, 0x1390005c, 0x1390005d,
- 0x13900060, 0x13900061, 0x13900063, 0x13900064,
- // Entry A0 - BF
- 0x1390006d, 0x13900072, 0x13900073, 0x13900074,
- 0x13900075, 0x1390007b, 0x1390007c, 0x1390007f,
- 0x13900080, 0x13900081, 0x13900083, 0x1390008a,
- 0x1390008c, 0x1390008d, 0x13900096, 0x13900097,
- 0x13900098, 0x13900099, 0x1390009a, 0x1390009f,
- 0x139000a0, 0x139000a4, 0x139000a7, 0x139000a9,
- 0x139000ad, 0x139000b1, 0x139000b4, 0x139000b5,
- 0x139000bf, 0x139000c0, 0x139000c6, 0x139000c7,
- // Entry C0 - DF
- 0x139000ca, 0x139000cb, 0x139000cc, 0x139000ce,
- 0x139000d0, 0x139000d2, 0x139000d5, 0x139000d6,
- 0x139000d9, 0x139000dd, 0x139000df, 0x139000e0,
- 0x139000e6, 0x139000e7, 0x139000e8, 0x139000eb,
- 0x139000ec, 0x139000f0, 0x13900107, 0x13900109,
- 0x1390010a, 0x1390010b, 0x1390010c, 0x1390010d,
- 0x1390010e, 0x1390010f, 0x13900112, 0x13900117,
- 0x1390011b, 0x1390011d, 0x1390011f, 0x13900125,
- // Entry E0 - FF
- 0x13900129, 0x1390012c, 0x1390012d, 0x1390012f,
- 0x13900131, 0x13900133, 0x13900135, 0x13900139,
- 0x1390013c, 0x1390013d, 0x1390013f, 0x13900142,
- 0x13900161, 0x13900162, 0x13900164, 0x13c00000,
- 0x13c00001, 0x13e00000, 0x13e0001f, 0x13e0002c,
- 0x13e0003f, 0x13e00041, 0x13e00048, 0x13e00051,
- 0x13e00054, 0x13e00056, 0x13e00059, 0x13e00065,
- 0x13e00068, 0x13e00069, 0x13e0006e, 0x13e00086,
- // Entry 100 - 11F
- 0x13e00089, 0x13e0008f, 0x13e00094, 0x13e000cf,
- 0x13e000d8, 0x13e000e2, 0x13e000e4, 0x13e000e7,
- 0x13e000ec, 0x13e000f1, 0x13e0011a, 0x13e00135,
- 0x13e00136, 0x13e0013b, 0x14000000, 0x1400006a,
- 0x14500000, 0x1450006e, 0x14600000, 0x14600052,
- 0x14800000, 0x14800024, 0x1480009c, 0x14e00000,
- 0x14e00052, 0x14e00084, 0x14e000c9, 0x14e00114,
- 0x15100000, 0x15100072, 0x15300000, 0x153000e7,
- // Entry 120 - 13F
- 0x15800000, 0x15800063, 0x15800076, 0x15e00000,
- 0x15e00036, 0x15e00037, 0x15e0003a, 0x15e0003b,
- 0x15e0003c, 0x15e00049, 0x15e0004b, 0x15e0004c,
- 0x15e0004d, 0x15e0004e, 0x15e0004f, 0x15e00052,
- 0x15e00062, 0x15e00067, 0x15e00078, 0x15e0007a,
- 0x15e0007e, 0x15e00084, 0x15e00085, 0x15e00086,
- 0x15e00091, 0x15e000a8, 0x15e000b7, 0x15e000ba,
- 0x15e000bb, 0x15e000be, 0x15e000bf, 0x15e000c3,
- // Entry 140 - 15F
- 0x15e000c8, 0x15e000c9, 0x15e000cc, 0x15e000d3,
- 0x15e000d4, 0x15e000e5, 0x15e000ea, 0x15e00102,
- 0x15e00107, 0x15e0010a, 0x15e00114, 0x15e0011c,
- 0x15e00120, 0x15e00122, 0x15e00128, 0x15e0013f,
- 0x15e00140, 0x15e0015f, 0x16900000, 0x1690009e,
- 0x16d00000, 0x16d000d9, 0x16e00000, 0x16e00096,
- 0x17e00000, 0x17e0007b, 0x19000000, 0x1900006e,
- 0x1a300000, 0x1a30004e, 0x1a300078, 0x1a3000b2,
- // Entry 160 - 17F
- 0x1a400000, 0x1a400099, 0x1a900000, 0x1ab00000,
- 0x1ab000a4, 0x1ac00000, 0x1ac00098, 0x1b400000,
- 0x1b400080, 0x1b4000d4, 0x1b4000d6, 0x1b800000,
- 0x1b800135, 0x1bc00000, 0x1bc00097, 0x1be00000,
- 0x1be00099, 0x1d100000, 0x1d100033, 0x1d100090,
- 0x1d200000, 0x1d200060, 0x1d500000, 0x1d500092,
- 0x1d700000, 0x1d700028, 0x1e100000, 0x1e100095,
- 0x1e700000, 0x1e7000d6, 0x1ea00000, 0x1ea00053,
- // Entry 180 - 19F
- 0x1f300000, 0x1f500000, 0x1f800000, 0x1f80009d,
- 0x1f900000, 0x1f90004e, 0x1f90009e, 0x1f900113,
- 0x1f900138, 0x1fa00000, 0x1fb00000, 0x20000000,
- 0x200000a2, 0x20300000, 0x20700000, 0x20700052,
- 0x20800000, 0x20a00000, 0x20a0012f, 0x20e00000,
- 0x20f00000, 0x21000000, 0x2100007d, 0x21200000,
- 0x21200067, 0x21600000, 0x21700000, 0x217000a4,
- 0x21f00000, 0x22300000, 0x2230012f, 0x22700000,
- // Entry 1A0 - 1BF
- 0x2270005a, 0x23400000, 0x234000c3, 0x23900000,
- 0x239000a4, 0x24200000, 0x242000ae, 0x24400000,
- 0x24400052, 0x24500000, 0x24500082, 0x24600000,
- 0x246000a4, 0x24a00000, 0x24a000a6, 0x25100000,
- 0x25100099, 0x25400000, 0x254000aa, 0x254000ab,
- 0x25600000, 0x25600099, 0x26a00000, 0x26a00099,
- 0x26b00000, 0x26b0012f, 0x26d00000, 0x26d00052,
- 0x26e00000, 0x26e00060, 0x27400000, 0x28100000,
- // Entry 1C0 - 1DF
- 0x2810007b, 0x28a00000, 0x28a000a5, 0x29100000,
- 0x2910012f, 0x29500000, 0x295000b7, 0x2a300000,
- 0x2a300131, 0x2af00000, 0x2af00135, 0x2b500000,
- 0x2b50002a, 0x2b50004b, 0x2b50004c, 0x2b50004d,
- 0x2b800000, 0x2b8000af, 0x2bf00000, 0x2bf0009b,
- 0x2bf0009c, 0x2c000000, 0x2c0000b6, 0x2c200000,
- 0x2c20004b, 0x2c400000, 0x2c4000a4, 0x2c500000,
- 0x2c5000a4, 0x2c700000, 0x2c7000b8, 0x2d100000,
- // Entry 1E0 - 1FF
- 0x2d1000a4, 0x2d10012f, 0x2e900000, 0x2e9000a4,
- 0x2ed00000, 0x2ed000cc, 0x2f100000, 0x2f1000bf,
- 0x2f200000, 0x2f2000d1, 0x2f400000, 0x2f400052,
- 0x2ff00000, 0x2ff000c2, 0x30400000, 0x30400099,
- 0x30b00000, 0x30b000c5, 0x31000000, 0x31b00000,
- 0x31b00099, 0x31f00000, 0x31f0003e, 0x31f000d0,
- 0x31f0010d, 0x32000000, 0x320000cb, 0x32500000,
- 0x32500052, 0x33100000, 0x331000c4, 0x33a00000,
- // Entry 200 - 21F
- 0x33a0009c, 0x34100000, 0x34500000, 0x345000d2,
- 0x34700000, 0x347000da, 0x34700110, 0x34e00000,
- 0x34e00164, 0x35000000, 0x35000060, 0x350000d9,
- 0x35100000, 0x35100099, 0x351000db, 0x36700000,
- 0x36700030, 0x36700036, 0x36700040, 0x3670005b,
- 0x367000d9, 0x36700116, 0x3670011b, 0x36800000,
- 0x36800052, 0x36a00000, 0x36a000da, 0x36c00000,
- 0x36c00052, 0x36f00000, 0x37500000, 0x37600000,
- // Entry 220 - 23F
- 0x37a00000, 0x38000000, 0x38000117, 0x38700000,
- 0x38900000, 0x38900131, 0x39000000, 0x3900006f,
- 0x390000a4, 0x39500000, 0x39500099, 0x39800000,
- 0x3980007d, 0x39800106, 0x39d00000, 0x39d05000,
- 0x39d050e8, 0x39d36000, 0x39d36099, 0x3a100000,
- 0x3b300000, 0x3b3000e9, 0x3bd00000, 0x3bd00001,
- 0x3be00000, 0x3be00024, 0x3c000000, 0x3c00002a,
- 0x3c000041, 0x3c00004e, 0x3c00005a, 0x3c000086,
- // Entry 240 - 25F
- 0x3c00008b, 0x3c0000b7, 0x3c0000c6, 0x3c0000d1,
- 0x3c0000ee, 0x3c000118, 0x3c000126, 0x3c400000,
- 0x3c40003f, 0x3c400069, 0x3c4000e4, 0x3d400000,
- 0x3d40004e, 0x3d900000, 0x3d90003a, 0x3dc00000,
- 0x3dc000bc, 0x3dc00104, 0x3de00000, 0x3de0012f,
- 0x3e200000, 0x3e200047, 0x3e2000a5, 0x3e2000ae,
- 0x3e2000bc, 0x3e200106, 0x3e200130, 0x3e500000,
- 0x3e500107, 0x3e600000, 0x3e60012f, 0x3eb00000,
- // Entry 260 - 27F
- 0x3eb00106, 0x3ec00000, 0x3ec000a4, 0x3f300000,
- 0x3f30012f, 0x3fa00000, 0x3fa000e8, 0x3fc00000,
- 0x3fd00000, 0x3fd00072, 0x3fd000da, 0x3fd0010c,
- 0x3ff00000, 0x3ff000d1, 0x40100000, 0x401000c3,
- 0x40200000, 0x4020004c, 0x40700000, 0x40800000,
- 0x4085a000, 0x4085a0ba, 0x408e8000, 0x408e80ba,
- 0x40c00000, 0x40c000b3, 0x41200000, 0x41200111,
- 0x41600000, 0x4160010f, 0x41c00000, 0x41d00000,
- // Entry 280 - 29F
- 0x41e00000, 0x41f00000, 0x41f00072, 0x42200000,
- 0x42300000, 0x42300164, 0x42900000, 0x42900062,
- 0x4290006f, 0x429000a4, 0x42900115, 0x43100000,
- 0x43100027, 0x431000c2, 0x4310014d, 0x43200000,
- 0x43220000, 0x43220033, 0x432200bd, 0x43220105,
- 0x4322014d, 0x4325a000, 0x4325a033, 0x4325a0bd,
- 0x4325a105, 0x4325a14d, 0x43700000, 0x43a00000,
- 0x43b00000, 0x44400000, 0x44400031, 0x44400072,
- // Entry 2A0 - 2BF
- 0x4440010c, 0x44500000, 0x4450004b, 0x445000a4,
- 0x4450012f, 0x44500131, 0x44e00000, 0x45000000,
- 0x45000099, 0x450000b3, 0x450000d0, 0x4500010d,
- 0x46100000, 0x46100099, 0x46400000, 0x464000a4,
- 0x46400131, 0x46700000, 0x46700124, 0x46b00000,
- 0x46b00123, 0x46f00000, 0x46f0006d, 0x46f0006f,
- 0x47100000, 0x47600000, 0x47600127, 0x47a00000,
- 0x48000000, 0x48200000, 0x48200129, 0x48a00000,
- // Entry 2C0 - 2DF
- 0x48a0005d, 0x48a0012b, 0x48e00000, 0x49400000,
- 0x49400106, 0x4a400000, 0x4a4000d4, 0x4a900000,
- 0x4a9000ba, 0x4ac00000, 0x4ac00053, 0x4ae00000,
- 0x4ae00130, 0x4b400000, 0x4b400099, 0x4b4000e8,
- 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000,
- 0x4bc20137, 0x4bc5a000, 0x4bc5a137, 0x4be00000,
- 0x4be5a000, 0x4be5a0b4, 0x4bef1000, 0x4bef10b4,
- 0x4c000000, 0x4c300000, 0x4c30013e, 0x4c900000,
- // Entry 2E0 - 2FF
- 0x4c900001, 0x4cc00000, 0x4cc0012f, 0x4ce00000,
- 0x4cf00000, 0x4cf0004e, 0x4e500000, 0x4e500114,
- 0x4f200000, 0x4fb00000, 0x4fb00131, 0x50900000,
- 0x50900052, 0x51200000, 0x51200001, 0x51800000,
- 0x5180003b, 0x518000d6, 0x51f00000, 0x51f3b000,
- 0x51f3b053, 0x51f3c000, 0x51f3c08d, 0x52800000,
- 0x528000ba, 0x52900000, 0x5293b000, 0x5293b053,
- 0x5293b08d, 0x5293b0c6, 0x5293b10d, 0x5293c000,
- // Entry 300 - 31F
- 0x5293c08d, 0x5293c0c6, 0x5293c12e, 0x52f00000,
- 0x52f00161,
-} // Size: 3116 bytes
-
-const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix"
-
-// Total table size 3147 bytes (3KiB); checksum: 6772C83C
diff --git a/vendor/golang.org/x/text/internal/language/compact/tags.go b/vendor/golang.org/x/text/internal/language/compact/tags.go
deleted file mode 100644
index ca135d295a..0000000000
--- a/vendor/golang.org/x/text/internal/language/compact/tags.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package compact
-
-var (
- und = Tag{}
-
- Und Tag = Tag{}
-
- Afrikaans Tag = Tag{language: afIndex, locale: afIndex}
- Amharic Tag = Tag{language: amIndex, locale: amIndex}
- Arabic Tag = Tag{language: arIndex, locale: arIndex}
- ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index}
- Azerbaijani Tag = Tag{language: azIndex, locale: azIndex}
- Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex}
- Bengali Tag = Tag{language: bnIndex, locale: bnIndex}
- Catalan Tag = Tag{language: caIndex, locale: caIndex}
- Czech Tag = Tag{language: csIndex, locale: csIndex}
- Danish Tag = Tag{language: daIndex, locale: daIndex}
- German Tag = Tag{language: deIndex, locale: deIndex}
- Greek Tag = Tag{language: elIndex, locale: elIndex}
- English Tag = Tag{language: enIndex, locale: enIndex}
- AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex}
- BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex}
- Spanish Tag = Tag{language: esIndex, locale: esIndex}
- EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex}
- LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index}
- Estonian Tag = Tag{language: etIndex, locale: etIndex}
- Persian Tag = Tag{language: faIndex, locale: faIndex}
- Finnish Tag = Tag{language: fiIndex, locale: fiIndex}
- Filipino Tag = Tag{language: filIndex, locale: filIndex}
- French Tag = Tag{language: frIndex, locale: frIndex}
- CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex}
- Gujarati Tag = Tag{language: guIndex, locale: guIndex}
- Hebrew Tag = Tag{language: heIndex, locale: heIndex}
- Hindi Tag = Tag{language: hiIndex, locale: hiIndex}
- Croatian Tag = Tag{language: hrIndex, locale: hrIndex}
- Hungarian Tag = Tag{language: huIndex, locale: huIndex}
- Armenian Tag = Tag{language: hyIndex, locale: hyIndex}
- Indonesian Tag = Tag{language: idIndex, locale: idIndex}
- Icelandic Tag = Tag{language: isIndex, locale: isIndex}
- Italian Tag = Tag{language: itIndex, locale: itIndex}
- Japanese Tag = Tag{language: jaIndex, locale: jaIndex}
- Georgian Tag = Tag{language: kaIndex, locale: kaIndex}
- Kazakh Tag = Tag{language: kkIndex, locale: kkIndex}
- Khmer Tag = Tag{language: kmIndex, locale: kmIndex}
- Kannada Tag = Tag{language: knIndex, locale: knIndex}
- Korean Tag = Tag{language: koIndex, locale: koIndex}
- Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex}
- Lao Tag = Tag{language: loIndex, locale: loIndex}
- Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex}
- Latvian Tag = Tag{language: lvIndex, locale: lvIndex}
- Macedonian Tag = Tag{language: mkIndex, locale: mkIndex}
- Malayalam Tag = Tag{language: mlIndex, locale: mlIndex}
- Mongolian Tag = Tag{language: mnIndex, locale: mnIndex}
- Marathi Tag = Tag{language: mrIndex, locale: mrIndex}
- Malay Tag = Tag{language: msIndex, locale: msIndex}
- Burmese Tag = Tag{language: myIndex, locale: myIndex}
- Nepali Tag = Tag{language: neIndex, locale: neIndex}
- Dutch Tag = Tag{language: nlIndex, locale: nlIndex}
- Norwegian Tag = Tag{language: noIndex, locale: noIndex}
- Punjabi Tag = Tag{language: paIndex, locale: paIndex}
- Polish Tag = Tag{language: plIndex, locale: plIndex}
- Portuguese Tag = Tag{language: ptIndex, locale: ptIndex}
- BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex}
- EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex}
- Romanian Tag = Tag{language: roIndex, locale: roIndex}
- Russian Tag = Tag{language: ruIndex, locale: ruIndex}
- Sinhala Tag = Tag{language: siIndex, locale: siIndex}
- Slovak Tag = Tag{language: skIndex, locale: skIndex}
- Slovenian Tag = Tag{language: slIndex, locale: slIndex}
- Albanian Tag = Tag{language: sqIndex, locale: sqIndex}
- Serbian Tag = Tag{language: srIndex, locale: srIndex}
- SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex}
- Swedish Tag = Tag{language: svIndex, locale: svIndex}
- Swahili Tag = Tag{language: swIndex, locale: swIndex}
- Tamil Tag = Tag{language: taIndex, locale: taIndex}
- Telugu Tag = Tag{language: teIndex, locale: teIndex}
- Thai Tag = Tag{language: thIndex, locale: thIndex}
- Turkish Tag = Tag{language: trIndex, locale: trIndex}
- Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex}
- Urdu Tag = Tag{language: urIndex, locale: urIndex}
- Uzbek Tag = Tag{language: uzIndex, locale: uzIndex}
- Vietnamese Tag = Tag{language: viIndex, locale: viIndex}
- Chinese Tag = Tag{language: zhIndex, locale: zhIndex}
- SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex}
- TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex}
- Zulu Tag = Tag{language: zuIndex, locale: zuIndex}
-)
diff --git a/vendor/golang.org/x/text/internal/language/compose.go b/vendor/golang.org/x/text/internal/language/compose.go
deleted file mode 100644
index 4ae78e0fa5..0000000000
--- a/vendor/golang.org/x/text/internal/language/compose.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "sort"
- "strings"
-)
-
-// A Builder allows constructing a Tag from individual components.
-// Its main user is Compose in the top-level language package.
-type Builder struct {
- Tag Tag
-
- private string // the x extension
- variants []string
- extensions []string
-}
-
-// Make returns a new Tag from the current settings.
-func (b *Builder) Make() Tag {
- t := b.Tag
-
- if len(b.extensions) > 0 || len(b.variants) > 0 {
- sort.Sort(sortVariants(b.variants))
- sort.Strings(b.extensions)
-
- if b.private != "" {
- b.extensions = append(b.extensions, b.private)
- }
- n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...)
- buf := make([]byte, n)
- p := t.genCoreBytes(buf)
- t.pVariant = byte(p)
- p += appendTokens(buf[p:], b.variants...)
- t.pExt = uint16(p)
- p += appendTokens(buf[p:], b.extensions...)
- t.str = string(buf[:p])
- // We may not always need to remake the string, but when or when not
- // to do so is rather tricky.
- scan := makeScanner(buf[:p])
- t, _ = parse(&scan, "")
- return t
-
- } else if b.private != "" {
- t.str = b.private
- t.RemakeString()
- }
- return t
-}
-
-// SetTag copies all the settings from a given Tag. Any previously set values
-// are discarded.
-func (b *Builder) SetTag(t Tag) {
- b.Tag.LangID = t.LangID
- b.Tag.RegionID = t.RegionID
- b.Tag.ScriptID = t.ScriptID
- // TODO: optimize
- b.variants = b.variants[:0]
- if variants := t.Variants(); variants != "" {
- for _, vr := range strings.Split(variants[1:], "-") {
- b.variants = append(b.variants, vr)
- }
- }
- b.extensions, b.private = b.extensions[:0], ""
- for _, e := range t.Extensions() {
- b.AddExt(e)
- }
-}
-
-// AddExt adds extension e to the tag. e must be a valid extension as returned
-// by Tag.Extension. If the extension already exists, it will be discarded,
-// except for a -u extension, where non-existing key-type pairs will added.
-func (b *Builder) AddExt(e string) {
- if e[0] == 'x' {
- if b.private == "" {
- b.private = e
- }
- return
- }
- for i, s := range b.extensions {
- if s[0] == e[0] {
- if e[0] == 'u' {
- b.extensions[i] += e[1:]
- }
- return
- }
- }
- b.extensions = append(b.extensions, e)
-}
-
-// SetExt sets the extension e to the tag. e must be a valid extension as
-// returned by Tag.Extension. If the extension already exists, it will be
-// overwritten, except for a -u extension, where the individual key-type pairs
-// will be set.
-func (b *Builder) SetExt(e string) {
- if e[0] == 'x' {
- b.private = e
- return
- }
- for i, s := range b.extensions {
- if s[0] == e[0] {
- if e[0] == 'u' {
- b.extensions[i] = e + s[1:]
- } else {
- b.extensions[i] = e
- }
- return
- }
- }
- b.extensions = append(b.extensions, e)
-}
-
-// AddVariant adds any number of variants.
-func (b *Builder) AddVariant(v ...string) {
- for _, v := range v {
- if v != "" {
- b.variants = append(b.variants, v)
- }
- }
-}
-
-// ClearVariants removes any variants previously added, including those
-// copied from a Tag in SetTag.
-func (b *Builder) ClearVariants() {
- b.variants = b.variants[:0]
-}
-
-// ClearExtensions removes any extensions previously added, including those
-// copied from a Tag in SetTag.
-func (b *Builder) ClearExtensions() {
- b.private = ""
- b.extensions = b.extensions[:0]
-}
-
-func tokenLen(token ...string) (n int) {
- for _, t := range token {
- n += len(t) + 1
- }
- return
-}
-
-func appendTokens(b []byte, token ...string) int {
- p := 0
- for _, t := range token {
- b[p] = '-'
- copy(b[p+1:], t)
- p += 1 + len(t)
- }
- return p
-}
-
-type sortVariants []string
-
-func (s sortVariants) Len() int {
- return len(s)
-}
-
-func (s sortVariants) Swap(i, j int) {
- s[j], s[i] = s[i], s[j]
-}
-
-func (s sortVariants) Less(i, j int) bool {
- return variantIndex[s[i]] < variantIndex[s[j]]
-}
diff --git a/vendor/golang.org/x/text/internal/language/coverage.go b/vendor/golang.org/x/text/internal/language/coverage.go
deleted file mode 100644
index 9b20b88feb..0000000000
--- a/vendor/golang.org/x/text/internal/language/coverage.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-// BaseLanguages returns the list of all supported base languages. It generates
-// the list by traversing the internal structures.
-func BaseLanguages() []Language {
- base := make([]Language, 0, NumLanguages)
- for i := 0; i < langNoIndexOffset; i++ {
- // We included "und" already for the value 0.
- if i != nonCanonicalUnd {
- base = append(base, Language(i))
- }
- }
- i := langNoIndexOffset
- for _, v := range langNoIndex {
- for k := 0; k < 8; k++ {
- if v&1 == 1 {
- base = append(base, Language(i))
- }
- v >>= 1
- i++
- }
- }
- return base
-}
diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go
deleted file mode 100644
index 09d41c7367..0000000000
--- a/vendor/golang.org/x/text/internal/language/language.go
+++ /dev/null
@@ -1,627 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run gen.go gen_common.go -output tables.go
-
-package language // import "golang.org/x/text/internal/language"
-
-// TODO: Remove above NOTE after:
-// - verifying that tables are dropped correctly (most notably matcher tables).
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-const (
- // maxCoreSize is the maximum size of a BCP 47 tag without variants and
- // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
- maxCoreSize = 12
-
- // max99thPercentileSize is a somewhat arbitrary buffer size that presumably
- // is large enough to hold at least 99% of the BCP 47 tags.
- max99thPercentileSize = 32
-
- // maxSimpleUExtensionSize is the maximum size of a -u extension with one
- // key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
- maxSimpleUExtensionSize = 14
-)
-
-// Tag represents a BCP 47 language tag. It is used to specify an instance of a
-// specific language or locale. All language tag values are guaranteed to be
-// well-formed. The zero value of Tag is Und.
-type Tag struct {
- // TODO: the following fields have the form TagTypeID. This name is chosen
- // to allow refactoring the public package without conflicting with its
- // Base, Script, and Region methods. Once the transition is fully completed
- // the ID can be stripped from the name.
-
- LangID Language
- RegionID Region
- // TODO: we will soon run out of positions for ScriptID. Idea: instead of
- // storing lang, region, and ScriptID codes, store only the compact index and
- // have a lookup table from this code to its expansion. This greatly speeds
- // up table lookup, speed up common variant cases.
- // This will also immediately free up 3 extra bytes. Also, the pVariant
- // field can now be moved to the lookup table, as the compact index uniquely
- // determines the offset of a possible variant.
- ScriptID Script
- pVariant byte // offset in str, includes preceding '-'
- pExt uint16 // offset of first extension, includes preceding '-'
-
- // str is the string representation of the Tag. It will only be used if the
- // tag has variants or extensions.
- str string
-}
-
-// Make is a convenience wrapper for Parse that omits the error.
-// In case of an error, a sensible default is returned.
-func Make(s string) Tag {
- t, _ := Parse(s)
- return t
-}
-
-// Raw returns the raw base language, script and region, without making an
-// attempt to infer their values.
-// TODO: consider removing
-func (t Tag) Raw() (b Language, s Script, r Region) {
- return t.LangID, t.ScriptID, t.RegionID
-}
-
-// equalTags compares language, script and region subtags only.
-func (t Tag) equalTags(a Tag) bool {
- return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID
-}
-
-// IsRoot returns true if t is equal to language "und".
-func (t Tag) IsRoot() bool {
- if int(t.pVariant) < len(t.str) {
- return false
- }
- return t.equalTags(Und)
-}
-
-// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use
-// tag.
-func (t Tag) IsPrivateUse() bool {
- return t.str != "" && t.pVariant == 0
-}
-
-// RemakeString is used to update t.str in case lang, script or region changed.
-// It is assumed that pExt and pVariant still point to the start of the
-// respective parts.
-func (t *Tag) RemakeString() {
- if t.str == "" {
- return
- }
- extra := t.str[t.pVariant:]
- if t.pVariant > 0 {
- extra = extra[1:]
- }
- if t.equalTags(Und) && strings.HasPrefix(extra, "x-") {
- t.str = extra
- t.pVariant = 0
- t.pExt = 0
- return
- }
- var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
- b := buf[:t.genCoreBytes(buf[:])]
- if extra != "" {
- diff := len(b) - int(t.pVariant)
- b = append(b, '-')
- b = append(b, extra...)
- t.pVariant = uint8(int(t.pVariant) + diff)
- t.pExt = uint16(int(t.pExt) + diff)
- } else {
- t.pVariant = uint8(len(b))
- t.pExt = uint16(len(b))
- }
- t.str = string(b)
-}
-
-// genCoreBytes writes a string for the base languages, script and region tags
-// to the given buffer and returns the number of bytes written. It will never
-// write more than maxCoreSize bytes.
-func (t *Tag) genCoreBytes(buf []byte) int {
- n := t.LangID.StringToBuf(buf[:])
- if t.ScriptID != 0 {
- n += copy(buf[n:], "-")
- n += copy(buf[n:], t.ScriptID.String())
- }
- if t.RegionID != 0 {
- n += copy(buf[n:], "-")
- n += copy(buf[n:], t.RegionID.String())
- }
- return n
-}
-
-// String returns the canonical string representation of the language tag.
-func (t Tag) String() string {
- if t.str != "" {
- return t.str
- }
- if t.ScriptID == 0 && t.RegionID == 0 {
- return t.LangID.String()
- }
- buf := [maxCoreSize]byte{}
- return string(buf[:t.genCoreBytes(buf[:])])
-}
-
-// MarshalText implements encoding.TextMarshaler.
-func (t Tag) MarshalText() (text []byte, err error) {
- if t.str != "" {
- text = append(text, t.str...)
- } else if t.ScriptID == 0 && t.RegionID == 0 {
- text = append(text, t.LangID.String()...)
- } else {
- buf := [maxCoreSize]byte{}
- text = buf[:t.genCoreBytes(buf[:])]
- }
- return text, nil
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (t *Tag) UnmarshalText(text []byte) error {
- tag, err := Parse(string(text))
- *t = tag
- return err
-}
-
-// Variants returns the part of the tag holding all variants or the empty string
-// if there are no variants defined.
-func (t Tag) Variants() string {
- if t.pVariant == 0 {
- return ""
- }
- return t.str[t.pVariant:t.pExt]
-}
-
-// VariantOrPrivateUseTags returns variants or private use tags.
-func (t Tag) VariantOrPrivateUseTags() string {
- if t.pExt > 0 {
- return t.str[t.pVariant:t.pExt]
- }
- return t.str[t.pVariant:]
-}
-
-// HasString reports whether this tag defines more than just the raw
-// components.
-func (t Tag) HasString() bool {
- return t.str != ""
-}
-
-// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
-// specific language are substituted with fields from the parent language.
-// The parent for a language may change for newer versions of CLDR.
-func (t Tag) Parent() Tag {
- if t.str != "" {
- // Strip the variants and extensions.
- b, s, r := t.Raw()
- t = Tag{LangID: b, ScriptID: s, RegionID: r}
- if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 {
- base, _ := addTags(Tag{LangID: t.LangID})
- if base.ScriptID == t.ScriptID {
- return Tag{LangID: t.LangID}
- }
- }
- return t
- }
- if t.LangID != 0 {
- if t.RegionID != 0 {
- maxScript := t.ScriptID
- if maxScript == 0 {
- max, _ := addTags(t)
- maxScript = max.ScriptID
- }
-
- for i := range parents {
- if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript {
- for _, r := range parents[i].fromRegion {
- if Region(r) == t.RegionID {
- return Tag{
- LangID: t.LangID,
- ScriptID: Script(parents[i].script),
- RegionID: Region(parents[i].toRegion),
- }
- }
- }
- }
- }
-
- // Strip the script if it is the default one.
- base, _ := addTags(Tag{LangID: t.LangID})
- if base.ScriptID != maxScript {
- return Tag{LangID: t.LangID, ScriptID: maxScript}
- }
- return Tag{LangID: t.LangID}
- } else if t.ScriptID != 0 {
- // The parent for an base-script pair with a non-default script is
- // "und" instead of the base language.
- base, _ := addTags(Tag{LangID: t.LangID})
- if base.ScriptID != t.ScriptID {
- return Und
- }
- return Tag{LangID: t.LangID}
- }
- }
- return Und
-}
-
-// ParseExtension parses s as an extension and returns it on success.
-func ParseExtension(s string) (ext string, err error) {
- defer func() {
- if recover() != nil {
- ext = ""
- err = ErrSyntax
- }
- }()
-
- scan := makeScannerString(s)
- var end int
- if n := len(scan.token); n != 1 {
- return "", ErrSyntax
- }
- scan.toLower(0, len(scan.b))
- end = parseExtension(&scan)
- if end != len(s) {
- return "", ErrSyntax
- }
- return string(scan.b), nil
-}
-
-// HasVariants reports whether t has variants.
-func (t Tag) HasVariants() bool {
- return uint16(t.pVariant) < t.pExt
-}
-
-// HasExtensions reports whether t has extensions.
-func (t Tag) HasExtensions() bool {
- return int(t.pExt) < len(t.str)
-}
-
-// Extension returns the extension of type x for tag t. It will return
-// false for ok if t does not have the requested extension. The returned
-// extension will be invalid in this case.
-func (t Tag) Extension(x byte) (ext string, ok bool) {
- for i := int(t.pExt); i < len(t.str)-1; {
- var ext string
- i, ext = getExtension(t.str, i)
- if ext[0] == x {
- return ext, true
- }
- }
- return "", false
-}
-
-// Extensions returns all extensions of t.
-func (t Tag) Extensions() []string {
- e := []string{}
- for i := int(t.pExt); i < len(t.str)-1; {
- var ext string
- i, ext = getExtension(t.str, i)
- e = append(e, ext)
- }
- return e
-}
-
-// TypeForKey returns the type associated with the given key, where key and type
-// are of the allowed values defined for the Unicode locale extension ('u') in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// TypeForKey will traverse the inheritance chain to get the correct value.
-//
-// If there are multiple types associated with a key, only the first will be
-// returned. If there is no type associated with a key, it returns the empty
-// string.
-func (t Tag) TypeForKey(key string) string {
- if _, start, end, _ := t.findTypeForKey(key); end != start {
- s := t.str[start:end]
- if p := strings.IndexByte(s, '-'); p >= 0 {
- s = s[:p]
- }
- return s
- }
- return ""
-}
-
-var (
- errPrivateUse = errors.New("cannot set a key on a private use tag")
- errInvalidArguments = errors.New("invalid key or type")
-)
-
-// SetTypeForKey returns a new Tag with the key set to type, where key and type
-// are of the allowed values defined for the Unicode locale extension ('u') in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// An empty value removes an existing pair with the same key.
-func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
- if t.IsPrivateUse() {
- return t, errPrivateUse
- }
- if len(key) != 2 {
- return t, errInvalidArguments
- }
-
- // Remove the setting if value is "".
- if value == "" {
- start, sep, end, _ := t.findTypeForKey(key)
- if start != sep {
- // Remove a possible empty extension.
- switch {
- case t.str[start-2] != '-': // has previous elements.
- case end == len(t.str), // end of string
- end+2 < len(t.str) && t.str[end+2] == '-': // end of extension
- start -= 2
- }
- if start == int(t.pVariant) && end == len(t.str) {
- t.str = ""
- t.pVariant, t.pExt = 0, 0
- } else {
- t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
- }
- }
- return t, nil
- }
-
- if len(value) < 3 || len(value) > 8 {
- return t, errInvalidArguments
- }
-
- var (
- buf [maxCoreSize + maxSimpleUExtensionSize]byte
- uStart int // start of the -u extension.
- )
-
- // Generate the tag string if needed.
- if t.str == "" {
- uStart = t.genCoreBytes(buf[:])
- buf[uStart] = '-'
- uStart++
- }
-
- // Create new key-type pair and parse it to verify.
- b := buf[uStart:]
- copy(b, "u-")
- copy(b[2:], key)
- b[4] = '-'
- b = b[:5+copy(b[5:], value)]
- scan := makeScanner(b)
- if parseExtensions(&scan); scan.err != nil {
- return t, scan.err
- }
-
- // Assemble the replacement string.
- if t.str == "" {
- t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
- t.str = string(buf[:uStart+len(b)])
- } else {
- s := t.str
- start, sep, end, hasExt := t.findTypeForKey(key)
- if start == sep {
- if hasExt {
- b = b[2:]
- }
- t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:])
- } else {
- t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:])
- }
- }
- return t, nil
-}
-
-// findTypeForKey returns the start and end position for the type corresponding
-// to key or the point at which to insert the key-value pair if the type
-// wasn't found. The hasExt return value reports whether an -u extension was present.
-// Note: the extensions are typically very small and are likely to contain
-// only one key-type pair.
-func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) {
- p := int(t.pExt)
- if len(key) != 2 || p == len(t.str) || p == 0 {
- return p, p, p, false
- }
- s := t.str
-
- // Find the correct extension.
- for p++; s[p] != 'u'; p++ {
- if s[p] > 'u' {
- p--
- return p, p, p, false
- }
- if p = nextExtension(s, p); p == len(s) {
- return len(s), len(s), len(s), false
- }
- }
- // Proceed to the hyphen following the extension name.
- p++
-
- // curKey is the key currently being processed.
- curKey := ""
-
- // Iterate over keys until we get the end of a section.
- for {
- end = p
- for p++; p < len(s) && s[p] != '-'; p++ {
- }
- n := p - end - 1
- if n <= 2 && curKey == key {
- if sep < end {
- sep++
- }
- return start, sep, end, true
- }
- switch n {
- case 0, // invalid string
- 1: // next extension
- return end, end, end, true
- case 2:
- // next key
- curKey = s[end+1 : p]
- if curKey > key {
- return end, end, end, true
- }
- start = end
- sep = p
- }
- }
-}
-
-// ParseBase parses a 2- or 3-letter ISO 639 code.
-// It returns a ValueError if s is a well-formed but unknown language identifier
-// or another error if another error occurred.
-func ParseBase(s string) (l Language, err error) {
- defer func() {
- if recover() != nil {
- l = 0
- err = ErrSyntax
- }
- }()
-
- if n := len(s); n < 2 || 3 < n {
- return 0, ErrSyntax
- }
- var buf [3]byte
- return getLangID(buf[:copy(buf[:], s)])
-}
-
-// ParseScript parses a 4-letter ISO 15924 code.
-// It returns a ValueError if s is a well-formed but unknown script identifier
-// or another error if another error occurred.
-func ParseScript(s string) (scr Script, err error) {
- defer func() {
- if recover() != nil {
- scr = 0
- err = ErrSyntax
- }
- }()
-
- if len(s) != 4 {
- return 0, ErrSyntax
- }
- var buf [4]byte
- return getScriptID(script, buf[:copy(buf[:], s)])
-}
-
-// EncodeM49 returns the Region for the given UN M.49 code.
-// It returns an error if r is not a valid code.
-func EncodeM49(r int) (Region, error) {
- return getRegionM49(r)
-}
-
-// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
-// It returns a ValueError if s is a well-formed but unknown region identifier
-// or another error if another error occurred.
-func ParseRegion(s string) (r Region, err error) {
- defer func() {
- if recover() != nil {
- r = 0
- err = ErrSyntax
- }
- }()
-
- if n := len(s); n < 2 || 3 < n {
- return 0, ErrSyntax
- }
- var buf [3]byte
- return getRegionID(buf[:copy(buf[:], s)])
-}
-
-// IsCountry returns whether this region is a country or autonomous area. This
-// includes non-standard definitions from CLDR.
-func (r Region) IsCountry() bool {
- if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK {
- return false
- }
- return true
-}
-
-// IsGroup returns whether this region defines a collection of regions. This
-// includes non-standard definitions from CLDR.
-func (r Region) IsGroup() bool {
- if r == 0 {
- return false
- }
- return int(regionInclusion[r]) < len(regionContainment)
-}
-
-// Contains returns whether Region c is contained by Region r. It returns true
-// if c == r.
-func (r Region) Contains(c Region) bool {
- if r == c {
- return true
- }
- g := regionInclusion[r]
- if g >= nRegionGroups {
- return false
- }
- m := regionContainment[g]
-
- d := regionInclusion[c]
- b := regionInclusionBits[d]
-
- // A contained country may belong to multiple disjoint groups. Matching any
- // of these indicates containment. If the contained region is a group, it
- // must strictly be a subset.
- if d >= nRegionGroups {
- return b&m != 0
- }
- return b&^m == 0
-}
-
-var errNoTLD = errors.New("language: region is not a valid ccTLD")
-
-// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
-// In all other cases it returns either the region itself or an error.
-//
-// This method may return an error for a region for which there exists a
-// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
-// region will already be canonicalized it was obtained from a Tag that was
-// obtained using any of the default methods.
-func (r Region) TLD() (Region, error) {
- // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
- // difference between ISO 3166-1 and IANA ccTLD.
- if r == _GB {
- r = _UK
- }
- if (r.typ() & ccTLD) == 0 {
- return 0, errNoTLD
- }
- return r, nil
-}
-
-// Canonicalize returns the region or a possible replacement if the region is
-// deprecated. It will not return a replacement for deprecated regions that
-// are split into multiple regions.
-func (r Region) Canonicalize() Region {
- if cr := normRegion(r); cr != 0 {
- return cr
- }
- return r
-}
-
-// Variant represents a registered variant of a language as defined by BCP 47.
-type Variant struct {
- ID uint8
- str string
-}
-
-// ParseVariant parses and returns a Variant. An error is returned if s is not
-// a valid variant.
-func ParseVariant(s string) (v Variant, err error) {
- defer func() {
- if recover() != nil {
- v = Variant{}
- err = ErrSyntax
- }
- }()
-
- s = strings.ToLower(s)
- if id, ok := variantIndex[s]; ok {
- return Variant{id, s}, nil
- }
- return Variant{}, NewValueError([]byte(s))
-}
-
-// String returns the string representation of the variant.
-func (v Variant) String() string {
- return v.str
-}
diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go
deleted file mode 100644
index 231b4fbdeb..0000000000
--- a/vendor/golang.org/x/text/internal/language/lookup.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "bytes"
- "fmt"
- "sort"
- "strconv"
-
- "golang.org/x/text/internal/tag"
-)
-
-// findIndex tries to find the given tag in idx and returns a standardized error
-// if it could not be found.
-func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
- if !tag.FixCase(form, key) {
- return 0, ErrSyntax
- }
- i := idx.Index(key)
- if i == -1 {
- return 0, NewValueError(key)
- }
- return i, nil
-}
-
-func searchUint(imap []uint16, key uint16) int {
- return sort.Search(len(imap), func(i int) bool {
- return imap[i] >= key
- })
-}
-
-type Language uint16
-
-// getLangID returns the langID of s if s is a canonical subtag
-// or langUnknown if s is not a canonical subtag.
-func getLangID(s []byte) (Language, error) {
- if len(s) == 2 {
- return getLangISO2(s)
- }
- return getLangISO3(s)
-}
-
-// TODO language normalization as well as the AliasMaps could be moved to the
-// higher level package, but it is a bit tricky to separate the generation.
-
-func (id Language) Canonicalize() (Language, AliasType) {
- return normLang(id)
-}
-
-// normLang returns the mapped langID of id according to mapping m.
-func normLang(id Language) (Language, AliasType) {
- k := sort.Search(len(AliasMap), func(i int) bool {
- return AliasMap[i].From >= uint16(id)
- })
- if k < len(AliasMap) && AliasMap[k].From == uint16(id) {
- return Language(AliasMap[k].To), AliasTypes[k]
- }
- return id, AliasTypeUnknown
-}
-
-// getLangISO2 returns the langID for the given 2-letter ISO language code
-// or unknownLang if this does not exist.
-func getLangISO2(s []byte) (Language, error) {
- if !tag.FixCase("zz", s) {
- return 0, ErrSyntax
- }
- if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
- return Language(i), nil
- }
- return 0, NewValueError(s)
-}
-
-const base = 'z' - 'a' + 1
-
-func strToInt(s []byte) uint {
- v := uint(0)
- for i := 0; i < len(s); i++ {
- v *= base
- v += uint(s[i] - 'a')
- }
- return v
-}
-
-// converts the given integer to the original ASCII string passed to strToInt.
-// len(s) must match the number of characters obtained.
-func intToStr(v uint, s []byte) {
- for i := len(s) - 1; i >= 0; i-- {
- s[i] = byte(v%base) + 'a'
- v /= base
- }
-}
-
-// getLangISO3 returns the langID for the given 3-letter ISO language code
-// or unknownLang if this does not exist.
-func getLangISO3(s []byte) (Language, error) {
- if tag.FixCase("und", s) {
- // first try to match canonical 3-letter entries
- for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
- if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
- // We treat "und" as special and always translate it to "unspecified".
- // Note that ZZ and Zzzz are private use and are not treated as
- // unspecified by default.
- id := Language(i)
- if id == nonCanonicalUnd {
- return 0, nil
- }
- return id, nil
- }
- }
- if i := altLangISO3.Index(s); i != -1 {
- return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil
- }
- n := strToInt(s)
- if langNoIndex[n/8]&(1<<(n%8)) != 0 {
- return Language(n) + langNoIndexOffset, nil
- }
- // Check for non-canonical uses of ISO3.
- for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
- if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
- return Language(i), nil
- }
- }
- return 0, NewValueError(s)
- }
- return 0, ErrSyntax
-}
-
-// StringToBuf writes the string to b and returns the number of bytes
-// written. cap(b) must be >= 3.
-func (id Language) StringToBuf(b []byte) int {
- if id >= langNoIndexOffset {
- intToStr(uint(id)-langNoIndexOffset, b[:3])
- return 3
- } else if id == 0 {
- return copy(b, "und")
- }
- l := lang[id<<2:]
- if l[3] == 0 {
- return copy(b, l[:3])
- }
- return copy(b, l[:2])
-}
-
-// String returns the BCP 47 representation of the langID.
-// Use b as variable name, instead of id, to ensure the variable
-// used is consistent with that of Base in which this type is embedded.
-func (b Language) String() string {
- if b == 0 {
- return "und"
- } else if b >= langNoIndexOffset {
- b -= langNoIndexOffset
- buf := [3]byte{}
- intToStr(uint(b), buf[:])
- return string(buf[:])
- }
- l := lang.Elem(int(b))
- if l[3] == 0 {
- return l[:3]
- }
- return l[:2]
-}
-
-// ISO3 returns the ISO 639-3 language code.
-func (b Language) ISO3() string {
- if b == 0 || b >= langNoIndexOffset {
- return b.String()
- }
- l := lang.Elem(int(b))
- if l[3] == 0 {
- return l[:3]
- } else if l[2] == 0 {
- return altLangISO3.Elem(int(l[3]))[:3]
- }
- // This allocation will only happen for 3-letter ISO codes
- // that are non-canonical BCP 47 language identifiers.
- return l[0:1] + l[2:4]
-}
-
-// IsPrivateUse reports whether this language code is reserved for private use.
-func (b Language) IsPrivateUse() bool {
- return langPrivateStart <= b && b <= langPrivateEnd
-}
-
-// SuppressScript returns the script marked as SuppressScript in the IANA
-// language tag repository, or 0 if there is no such script.
-func (b Language) SuppressScript() Script {
- if b < langNoIndexOffset {
- return Script(suppressScript[b])
- }
- return 0
-}
-
-type Region uint16
-
-// getRegionID returns the region id for s if s is a valid 2-letter region code
-// or unknownRegion.
-func getRegionID(s []byte) (Region, error) {
- if len(s) == 3 {
- if isAlpha(s[0]) {
- return getRegionISO3(s)
- }
- if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
- return getRegionM49(int(i))
- }
- }
- return getRegionISO2(s)
-}
-
-// getRegionISO2 returns the regionID for the given 2-letter ISO country code
-// or unknownRegion if this does not exist.
-func getRegionISO2(s []byte) (Region, error) {
- i, err := findIndex(regionISO, s, "ZZ")
- if err != nil {
- return 0, err
- }
- return Region(i) + isoRegionOffset, nil
-}
-
-// getRegionISO3 returns the regionID for the given 3-letter ISO country code
-// or unknownRegion if this does not exist.
-func getRegionISO3(s []byte) (Region, error) {
- if tag.FixCase("ZZZ", s) {
- for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
- if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
- return Region(i) + isoRegionOffset, nil
- }
- }
- for i := 0; i < len(altRegionISO3); i += 3 {
- if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
- return Region(altRegionIDs[i/3]), nil
- }
- }
- return 0, NewValueError(s)
- }
- return 0, ErrSyntax
-}
-
-func getRegionM49(n int) (Region, error) {
- if 0 < n && n <= 999 {
- const (
- searchBits = 7
- regionBits = 9
- regionMask = 1<> searchBits
- buf := fromM49[m49Index[idx]:m49Index[idx+1]]
- val := uint16(n) << regionBits // we rely on bits shifting out
- i := sort.Search(len(buf), func(i int) bool {
- return buf[i] >= val
- })
- if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
- return Region(r & regionMask), nil
- }
- }
- var e ValueError
- fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
- return 0, e
-}
-
-// normRegion returns a region if r is deprecated or 0 otherwise.
-// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
-// TODO: consider mapping split up regions to new most populous one (like CLDR).
-func normRegion(r Region) Region {
- m := regionOldMap
- k := sort.Search(len(m), func(i int) bool {
- return m[i].From >= uint16(r)
- })
- if k < len(m) && m[k].From == uint16(r) {
- return Region(m[k].To)
- }
- return 0
-}
-
-const (
- iso3166UserAssigned = 1 << iota
- ccTLD
- bcp47Region
-)
-
-func (r Region) typ() byte {
- return regionTypes[r]
-}
-
-// String returns the BCP 47 representation for the region.
-// It returns "ZZ" for an unspecified region.
-func (r Region) String() string {
- if r < isoRegionOffset {
- if r == 0 {
- return "ZZ"
- }
- return fmt.Sprintf("%03d", r.M49())
- }
- r -= isoRegionOffset
- return regionISO.Elem(int(r))[:2]
-}
-
-// ISO3 returns the 3-letter ISO code of r.
-// Note that not all regions have a 3-letter ISO code.
-// In such cases this method returns "ZZZ".
-func (r Region) ISO3() string {
- if r < isoRegionOffset {
- return "ZZZ"
- }
- r -= isoRegionOffset
- reg := regionISO.Elem(int(r))
- switch reg[2] {
- case 0:
- return altRegionISO3[reg[3]:][:3]
- case ' ':
- return "ZZZ"
- }
- return reg[0:1] + reg[2:4]
-}
-
-// M49 returns the UN M.49 encoding of r, or 0 if this encoding
-// is not defined for r.
-func (r Region) M49() int {
- return int(m49[r])
-}
-
-// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
-// may include private-use tags that are assigned by CLDR and used in this
-// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
-func (r Region) IsPrivateUse() bool {
- return r.typ()&iso3166UserAssigned != 0
-}
-
-type Script uint16
-
-// getScriptID returns the script id for string s. It assumes that s
-// is of the format [A-Z][a-z]{3}.
-func getScriptID(idx tag.Index, s []byte) (Script, error) {
- i, err := findIndex(idx, s, "Zzzz")
- return Script(i), err
-}
-
-// String returns the script code in title case.
-// It returns "Zzzz" for an unspecified script.
-func (s Script) String() string {
- if s == 0 {
- return "Zzzz"
- }
- return script.Elem(int(s))
-}
-
-// IsPrivateUse reports whether this script code is reserved for private use.
-func (s Script) IsPrivateUse() bool {
- return _Qaaa <= s && s <= _Qabx
-}
-
-const (
- maxAltTaglen = len("en-US-POSIX")
- maxLen = maxAltTaglen
-)
-
-var (
- // grandfatheredMap holds a mapping from legacy and grandfathered tags to
- // their base language or index to more elaborate tag.
- grandfatheredMap = map[[maxLen]byte]int16{
- [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
- [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
- [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
- [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
- [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
- [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
- [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
- [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
- [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
- [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
- [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
- [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
- [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
- [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
- [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
- [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
- [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
- [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
- [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
- [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
-
- // Grandfathered tags with no modern replacement will be converted as
- // follows:
- [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
- [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
- [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
- [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
- [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
- [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
-
- // CLDR-specific tag.
- [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
- [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
- }
-
- altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
-
- altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
-)
-
-func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
- if v, ok := grandfatheredMap[s]; ok {
- if v < 0 {
- return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
- }
- t.LangID = Language(v)
- return t, true
- }
- return t, false
-}
diff --git a/vendor/golang.org/x/text/internal/language/match.go b/vendor/golang.org/x/text/internal/language/match.go
deleted file mode 100644
index 75a2dbca76..0000000000
--- a/vendor/golang.org/x/text/internal/language/match.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import "errors"
-
-type scriptRegionFlags uint8
-
-const (
- isList = 1 << iota
- scriptInFrom
- regionInFrom
-)
-
-func (t *Tag) setUndefinedLang(id Language) {
- if t.LangID == 0 {
- t.LangID = id
- }
-}
-
-func (t *Tag) setUndefinedScript(id Script) {
- if t.ScriptID == 0 {
- t.ScriptID = id
- }
-}
-
-func (t *Tag) setUndefinedRegion(id Region) {
- if t.RegionID == 0 || t.RegionID.Contains(id) {
- t.RegionID = id
- }
-}
-
-// ErrMissingLikelyTagsData indicates no information was available
-// to compute likely values of missing tags.
-var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
-
-// addLikelySubtags sets subtags to their most likely value, given the locale.
-// In most cases this means setting fields for unknown values, but in some
-// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
-// if the given locale cannot be expanded.
-func (t Tag) addLikelySubtags() (Tag, error) {
- id, err := addTags(t)
- if err != nil {
- return t, err
- } else if id.equalTags(t) {
- return t, nil
- }
- id.RemakeString()
- return id, nil
-}
-
-// specializeRegion attempts to specialize a group region.
-func specializeRegion(t *Tag) bool {
- if i := regionInclusion[t.RegionID]; i < nRegionGroups {
- x := likelyRegionGroup[i]
- if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID {
- t.RegionID = Region(x.region)
- }
- return true
- }
- return false
-}
-
-// Maximize returns a new tag with missing tags filled in.
-func (t Tag) Maximize() (Tag, error) {
- return addTags(t)
-}
-
-func addTags(t Tag) (Tag, error) {
- // We leave private use identifiers alone.
- if t.IsPrivateUse() {
- return t, nil
- }
- if t.ScriptID != 0 && t.RegionID != 0 {
- if t.LangID != 0 {
- // already fully specified
- specializeRegion(&t)
- return t, nil
- }
- // Search matches for und-script-region. Note that for these cases
- // region will never be a group so there is no need to check for this.
- list := likelyRegion[t.RegionID : t.RegionID+1]
- if x := list[0]; x.flags&isList != 0 {
- list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
- }
- for _, x := range list {
- // Deviating from the spec. See match_test.go for details.
- if Script(x.script) == t.ScriptID {
- t.setUndefinedLang(Language(x.lang))
- return t, nil
- }
- }
- }
- if t.LangID != 0 {
- // Search matches for lang-script and lang-region, where lang != und.
- if t.LangID < langNoIndexOffset {
- x := likelyLang[t.LangID]
- if x.flags&isList != 0 {
- list := likelyLangList[x.region : x.region+uint16(x.script)]
- if t.ScriptID != 0 {
- for _, x := range list {
- if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 {
- t.setUndefinedRegion(Region(x.region))
- return t, nil
- }
- }
- } else if t.RegionID != 0 {
- count := 0
- goodScript := true
- tt := t
- for _, x := range list {
- // We visit all entries for which the script was not
- // defined, including the ones where the region was not
- // defined. This allows for proper disambiguation within
- // regions.
- if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) {
- tt.RegionID = Region(x.region)
- tt.setUndefinedScript(Script(x.script))
- goodScript = goodScript && tt.ScriptID == Script(x.script)
- count++
- }
- }
- if count == 1 {
- return tt, nil
- }
- // Even if we fail to find a unique Region, we might have
- // an unambiguous script.
- if goodScript {
- t.ScriptID = tt.ScriptID
- }
- }
- }
- }
- } else {
- // Search matches for und-script.
- if t.ScriptID != 0 {
- x := likelyScript[t.ScriptID]
- if x.region != 0 {
- t.setUndefinedRegion(Region(x.region))
- t.setUndefinedLang(Language(x.lang))
- return t, nil
- }
- }
- // Search matches for und-region. If und-script-region exists, it would
- // have been found earlier.
- if t.RegionID != 0 {
- if i := regionInclusion[t.RegionID]; i < nRegionGroups {
- x := likelyRegionGroup[i]
- if x.region != 0 {
- t.setUndefinedLang(Language(x.lang))
- t.setUndefinedScript(Script(x.script))
- t.RegionID = Region(x.region)
- }
- } else {
- x := likelyRegion[t.RegionID]
- if x.flags&isList != 0 {
- x = likelyRegionList[x.lang]
- }
- if x.script != 0 && x.flags != scriptInFrom {
- t.setUndefinedLang(Language(x.lang))
- t.setUndefinedScript(Script(x.script))
- return t, nil
- }
- }
- }
- }
-
- // Search matches for lang.
- if t.LangID < langNoIndexOffset {
- x := likelyLang[t.LangID]
- if x.flags&isList != 0 {
- x = likelyLangList[x.region]
- }
- if x.region != 0 {
- t.setUndefinedScript(Script(x.script))
- t.setUndefinedRegion(Region(x.region))
- }
- specializeRegion(&t)
- if t.LangID == 0 {
- t.LangID = _en // default language
- }
- return t, nil
- }
- return t, ErrMissingLikelyTagsData
-}
-
-func (t *Tag) setTagsFrom(id Tag) {
- t.LangID = id.LangID
- t.ScriptID = id.ScriptID
- t.RegionID = id.RegionID
-}
-
-// minimize removes the region or script subtags from t such that
-// t.addLikelySubtags() == t.minimize().addLikelySubtags().
-func (t Tag) minimize() (Tag, error) {
- t, err := minimizeTags(t)
- if err != nil {
- return t, err
- }
- t.RemakeString()
- return t, nil
-}
-
-// minimizeTags mimics the behavior of the ICU 51 C implementation.
-func minimizeTags(t Tag) (Tag, error) {
- if t.equalTags(Und) {
- return t, nil
- }
- max, err := addTags(t)
- if err != nil {
- return t, err
- }
- for _, id := range [...]Tag{
- {LangID: t.LangID},
- {LangID: t.LangID, RegionID: t.RegionID},
- {LangID: t.LangID, ScriptID: t.ScriptID},
- } {
- if x, err := addTags(id); err == nil && max.equalTags(x) {
- t.setTagsFrom(id)
- break
- }
- }
- return t, nil
-}
diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go
deleted file mode 100644
index aad1e0acf7..0000000000
--- a/vendor/golang.org/x/text/internal/language/parse.go
+++ /dev/null
@@ -1,608 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "bytes"
- "errors"
- "fmt"
- "sort"
-
- "golang.org/x/text/internal/tag"
-)
-
-// isAlpha returns true if the byte is not a digit.
-// b must be an ASCII letter or digit.
-func isAlpha(b byte) bool {
- return b > '9'
-}
-
-// isAlphaNum returns true if the string contains only ASCII letters or digits.
-func isAlphaNum(s []byte) bool {
- for _, c := range s {
- if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
- return false
- }
- }
- return true
-}
-
-// ErrSyntax is returned by any of the parsing functions when the
-// input is not well-formed, according to BCP 47.
-// TODO: return the position at which the syntax error occurred?
-var ErrSyntax = errors.New("language: tag is not well-formed")
-
-// ErrDuplicateKey is returned when a tag contains the same key twice with
-// different values in the -u section.
-var ErrDuplicateKey = errors.New("language: different values for same key in -u extension")
-
-// ValueError is returned by any of the parsing functions when the
-// input is well-formed but the respective subtag is not recognized
-// as a valid value.
-type ValueError struct {
- v [8]byte
-}
-
-// NewValueError creates a new ValueError.
-func NewValueError(tag []byte) ValueError {
- var e ValueError
- copy(e.v[:], tag)
- return e
-}
-
-func (e ValueError) tag() []byte {
- n := bytes.IndexByte(e.v[:], 0)
- if n == -1 {
- n = 8
- }
- return e.v[:n]
-}
-
-// Error implements the error interface.
-func (e ValueError) Error() string {
- return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
-}
-
-// Subtag returns the subtag for which the error occurred.
-func (e ValueError) Subtag() string {
- return string(e.tag())
-}
-
-// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
-type scanner struct {
- b []byte
- bytes [max99thPercentileSize]byte
- token []byte
- start int // start position of the current token
- end int // end position of the current token
- next int // next point for scan
- err error
- done bool
-}
-
-func makeScannerString(s string) scanner {
- scan := scanner{}
- if len(s) <= len(scan.bytes) {
- scan.b = scan.bytes[:copy(scan.bytes[:], s)]
- } else {
- scan.b = []byte(s)
- }
- scan.init()
- return scan
-}
-
-// makeScanner returns a scanner using b as the input buffer.
-// b is not copied and may be modified by the scanner routines.
-func makeScanner(b []byte) scanner {
- scan := scanner{b: b}
- scan.init()
- return scan
-}
-
-func (s *scanner) init() {
- for i, c := range s.b {
- if c == '_' {
- s.b[i] = '-'
- }
- }
- s.scan()
-}
-
-// restToLower converts the string between start and end to lower case.
-func (s *scanner) toLower(start, end int) {
- for i := start; i < end; i++ {
- c := s.b[i]
- if 'A' <= c && c <= 'Z' {
- s.b[i] += 'a' - 'A'
- }
- }
-}
-
-func (s *scanner) setError(e error) {
- if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) {
- s.err = e
- }
-}
-
-// resizeRange shrinks or grows the array at position oldStart such that
-// a new string of size newSize can fit between oldStart and oldEnd.
-// Sets the scan point to after the resized range.
-func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
- s.start = oldStart
- if end := oldStart + newSize; end != oldEnd {
- diff := end - oldEnd
- var b []byte
- if n := len(s.b) + diff; n > cap(s.b) {
- b = make([]byte, n)
- copy(b, s.b[:oldStart])
- } else {
- b = s.b[:n]
- }
- copy(b[end:], s.b[oldEnd:])
- s.b = b
- s.next = end + (s.next - s.end)
- s.end = end
- }
-}
-
-// replace replaces the current token with repl.
-func (s *scanner) replace(repl string) {
- s.resizeRange(s.start, s.end, len(repl))
- copy(s.b[s.start:], repl)
-}
-
-// gobble removes the current token from the input.
-// Caller must call scan after calling gobble.
-func (s *scanner) gobble(e error) {
- s.setError(e)
- if s.start == 0 {
- s.b = s.b[:+copy(s.b, s.b[s.next:])]
- s.end = 0
- } else {
- s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
- s.end = s.start - 1
- }
- s.next = s.start
-}
-
-// deleteRange removes the given range from s.b before the current token.
-func (s *scanner) deleteRange(start, end int) {
- s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
- diff := end - start
- s.next -= diff
- s.start -= diff
- s.end -= diff
-}
-
-// scan parses the next token of a BCP 47 string. Tokens that are larger
-// than 8 characters or include non-alphanumeric characters result in an error
-// and are gobbled and removed from the output.
-// It returns the end position of the last token consumed.
-func (s *scanner) scan() (end int) {
- end = s.end
- s.token = nil
- for s.start = s.next; s.next < len(s.b); {
- i := bytes.IndexByte(s.b[s.next:], '-')
- if i == -1 {
- s.end = len(s.b)
- s.next = len(s.b)
- i = s.end - s.start
- } else {
- s.end = s.next + i
- s.next = s.end + 1
- }
- token := s.b[s.start:s.end]
- if i < 1 || i > 8 || !isAlphaNum(token) {
- s.gobble(ErrSyntax)
- continue
- }
- s.token = token
- return end
- }
- if n := len(s.b); n > 0 && s.b[n-1] == '-' {
- s.setError(ErrSyntax)
- s.b = s.b[:len(s.b)-1]
- }
- s.done = true
- return end
-}
-
-// acceptMinSize parses multiple tokens of the given size or greater.
-// It returns the end position of the last token consumed.
-func (s *scanner) acceptMinSize(min int) (end int) {
- end = s.end
- s.scan()
- for ; len(s.token) >= min; s.scan() {
- end = s.end
- }
- return end
-}
-
-// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
-// failed it returns an error and any part of the tag that could be parsed.
-// If parsing succeeded but an unknown value was found, it returns
-// ValueError. The Tag returned in this case is just stripped of the unknown
-// value. All other values are preserved. It accepts tags in the BCP 47 format
-// and extensions to this standard defined in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-func Parse(s string) (t Tag, err error) {
- // TODO: consider supporting old-style locale key-value pairs.
- if s == "" {
- return Und, ErrSyntax
- }
- defer func() {
- if recover() != nil {
- t = Und
- err = ErrSyntax
- return
- }
- }()
- if len(s) <= maxAltTaglen {
- b := [maxAltTaglen]byte{}
- for i, c := range s {
- // Generating invalid UTF-8 is okay as it won't match.
- if 'A' <= c && c <= 'Z' {
- c += 'a' - 'A'
- } else if c == '_' {
- c = '-'
- }
- b[i] = byte(c)
- }
- if t, ok := grandfathered(b); ok {
- return t, nil
- }
- }
- scan := makeScannerString(s)
- return parse(&scan, s)
-}
-
-func parse(scan *scanner, s string) (t Tag, err error) {
- t = Und
- var end int
- if n := len(scan.token); n <= 1 {
- scan.toLower(0, len(scan.b))
- if n == 0 || scan.token[0] != 'x' {
- return t, ErrSyntax
- }
- end = parseExtensions(scan)
- } else if n >= 4 {
- return Und, ErrSyntax
- } else { // the usual case
- t, end = parseTag(scan, true)
- if n := len(scan.token); n == 1 {
- t.pExt = uint16(end)
- end = parseExtensions(scan)
- } else if end < len(scan.b) {
- scan.setError(ErrSyntax)
- scan.b = scan.b[:end]
- }
- }
- if int(t.pVariant) < len(scan.b) {
- if end < len(s) {
- s = s[:end]
- }
- if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
- t.str = s
- } else {
- t.str = string(scan.b)
- }
- } else {
- t.pVariant, t.pExt = 0, 0
- }
- return t, scan.err
-}
-
-// parseTag parses language, script, region and variants.
-// It returns a Tag and the end position in the input that was parsed.
-// If doNorm is true, then - will be normalized to .
-func parseTag(scan *scanner, doNorm bool) (t Tag, end int) {
- var e error
- // TODO: set an error if an unknown lang, script or region is encountered.
- t.LangID, e = getLangID(scan.token)
- scan.setError(e)
- scan.replace(t.LangID.String())
- langStart := scan.start
- end = scan.scan()
- for len(scan.token) == 3 && isAlpha(scan.token[0]) {
- // From http://tools.ietf.org/html/bcp47, - tags are equivalent
- // to a tag of the form .
- if doNorm {
- lang, e := getLangID(scan.token)
- if lang != 0 {
- t.LangID = lang
- langStr := lang.String()
- copy(scan.b[langStart:], langStr)
- scan.b[langStart+len(langStr)] = '-'
- scan.start = langStart + len(langStr) + 1
- }
- scan.gobble(e)
- }
- end = scan.scan()
- }
- if len(scan.token) == 4 && isAlpha(scan.token[0]) {
- t.ScriptID, e = getScriptID(script, scan.token)
- if t.ScriptID == 0 {
- scan.gobble(e)
- }
- end = scan.scan()
- }
- if n := len(scan.token); n >= 2 && n <= 3 {
- t.RegionID, e = getRegionID(scan.token)
- if t.RegionID == 0 {
- scan.gobble(e)
- } else {
- scan.replace(t.RegionID.String())
- }
- end = scan.scan()
- }
- scan.toLower(scan.start, len(scan.b))
- t.pVariant = byte(end)
- end = parseVariants(scan, end, t)
- t.pExt = uint16(end)
- return t, end
-}
-
-var separator = []byte{'-'}
-
-// parseVariants scans tokens as long as each token is a valid variant string.
-// Duplicate variants are removed.
-func parseVariants(scan *scanner, end int, t Tag) int {
- start := scan.start
- varIDBuf := [4]uint8{}
- variantBuf := [4][]byte{}
- varID := varIDBuf[:0]
- variant := variantBuf[:0]
- last := -1
- needSort := false
- for ; len(scan.token) >= 4; scan.scan() {
- // TODO: measure the impact of needing this conversion and redesign
- // the data structure if there is an issue.
- v, ok := variantIndex[string(scan.token)]
- if !ok {
- // unknown variant
- // TODO: allow user-defined variants?
- scan.gobble(NewValueError(scan.token))
- continue
- }
- varID = append(varID, v)
- variant = append(variant, scan.token)
- if !needSort {
- if last < int(v) {
- last = int(v)
- } else {
- needSort = true
- // There is no legal combinations of more than 7 variants
- // (and this is by no means a useful sequence).
- const maxVariants = 8
- if len(varID) > maxVariants {
- break
- }
- }
- }
- end = scan.end
- }
- if needSort {
- sort.Sort(variantsSort{varID, variant})
- k, l := 0, -1
- for i, v := range varID {
- w := int(v)
- if l == w {
- // Remove duplicates.
- continue
- }
- varID[k] = varID[i]
- variant[k] = variant[i]
- k++
- l = w
- }
- if str := bytes.Join(variant[:k], separator); len(str) == 0 {
- end = start - 1
- } else {
- scan.resizeRange(start, end, len(str))
- copy(scan.b[scan.start:], str)
- end = scan.end
- }
- }
- return end
-}
-
-type variantsSort struct {
- i []uint8
- v [][]byte
-}
-
-func (s variantsSort) Len() int {
- return len(s.i)
-}
-
-func (s variantsSort) Swap(i, j int) {
- s.i[i], s.i[j] = s.i[j], s.i[i]
- s.v[i], s.v[j] = s.v[j], s.v[i]
-}
-
-func (s variantsSort) Less(i, j int) bool {
- return s.i[i] < s.i[j]
-}
-
-type bytesSort struct {
- b [][]byte
- n int // first n bytes to compare
-}
-
-func (b bytesSort) Len() int {
- return len(b.b)
-}
-
-func (b bytesSort) Swap(i, j int) {
- b.b[i], b.b[j] = b.b[j], b.b[i]
-}
-
-func (b bytesSort) Less(i, j int) bool {
- for k := 0; k < b.n; k++ {
- if b.b[i][k] == b.b[j][k] {
- continue
- }
- return b.b[i][k] < b.b[j][k]
- }
- return false
-}
-
-// parseExtensions parses and normalizes the extensions in the buffer.
-// It returns the last position of scan.b that is part of any extension.
-// It also trims scan.b to remove excess parts accordingly.
-func parseExtensions(scan *scanner) int {
- start := scan.start
- exts := [][]byte{}
- private := []byte{}
- end := scan.end
- for len(scan.token) == 1 {
- extStart := scan.start
- ext := scan.token[0]
- end = parseExtension(scan)
- extension := scan.b[extStart:end]
- if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
- scan.setError(ErrSyntax)
- end = extStart
- continue
- } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
- scan.b = scan.b[:end]
- return end
- } else if ext == 'x' {
- private = extension
- break
- }
- exts = append(exts, extension)
- }
- sort.Sort(bytesSort{exts, 1})
- if len(private) > 0 {
- exts = append(exts, private)
- }
- scan.b = scan.b[:start]
- if len(exts) > 0 {
- scan.b = append(scan.b, bytes.Join(exts, separator)...)
- } else if start > 0 {
- // Strip trailing '-'.
- scan.b = scan.b[:start-1]
- }
- return end
-}
-
-// parseExtension parses a single extension and returns the position of
-// the extension end.
-func parseExtension(scan *scanner) int {
- start, end := scan.start, scan.end
- switch scan.token[0] {
- case 'u': // https://www.ietf.org/rfc/rfc6067.txt
- attrStart := end
- scan.scan()
- for last := []byte{}; len(scan.token) > 2; scan.scan() {
- if bytes.Compare(scan.token, last) != -1 {
- // Attributes are unsorted. Start over from scratch.
- p := attrStart + 1
- scan.next = p
- attrs := [][]byte{}
- for scan.scan(); len(scan.token) > 2; scan.scan() {
- attrs = append(attrs, scan.token)
- end = scan.end
- }
- sort.Sort(bytesSort{attrs, 3})
- copy(scan.b[p:], bytes.Join(attrs, separator))
- break
- }
- last = scan.token
- end = scan.end
- }
- // Scan key-type sequences. A key is of length 2 and may be followed
- // by 0 or more "type" subtags from 3 to the maximum of 8 letters.
- var last, key []byte
- for attrEnd := end; len(scan.token) == 2; last = key {
- key = scan.token
- end = scan.end
- for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() {
- end = scan.end
- }
- // TODO: check key value validity
- if bytes.Compare(key, last) != 1 || scan.err != nil {
- // We have an invalid key or the keys are not sorted.
- // Start scanning keys from scratch and reorder.
- p := attrEnd + 1
- scan.next = p
- keys := [][]byte{}
- for scan.scan(); len(scan.token) == 2; {
- keyStart := scan.start
- end = scan.end
- for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() {
- end = scan.end
- }
- keys = append(keys, scan.b[keyStart:end])
- }
- sort.Stable(bytesSort{keys, 2})
- if n := len(keys); n > 0 {
- k := 0
- for i := 1; i < n; i++ {
- if !bytes.Equal(keys[k][:2], keys[i][:2]) {
- k++
- keys[k] = keys[i]
- } else if !bytes.Equal(keys[k], keys[i]) {
- scan.setError(ErrDuplicateKey)
- }
- }
- keys = keys[:k+1]
- }
- reordered := bytes.Join(keys, separator)
- if e := p + len(reordered); e < end {
- scan.deleteRange(e, end)
- end = e
- }
- copy(scan.b[p:], reordered)
- break
- }
- }
- case 't': // https://www.ietf.org/rfc/rfc6497.txt
- scan.scan()
- if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
- _, end = parseTag(scan, false)
- scan.toLower(start, end)
- }
- for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
- end = scan.acceptMinSize(3)
- }
- case 'x':
- end = scan.acceptMinSize(1)
- default:
- end = scan.acceptMinSize(2)
- }
- return end
-}
-
-// getExtension returns the name, body and end position of the extension.
-func getExtension(s string, p int) (end int, ext string) {
- if s[p] == '-' {
- p++
- }
- if s[p] == 'x' {
- return len(s), s[p:]
- }
- end = nextExtension(s, p)
- return end, s[p:end]
-}
-
-// nextExtension finds the next extension within the string, searching
-// for the -- pattern from position p.
-// In the fast majority of cases, language tags will have at most
-// one extension and extensions tend to be small.
-func nextExtension(s string, p int) int {
- for n := len(s) - 3; p < n; {
- if s[p] == '-' {
- if s[p+2] == '-' {
- return p
- }
- p += 3
- } else {
- p++
- }
- }
- return len(s)
-}
diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go
deleted file mode 100644
index fb6b58378b..0000000000
--- a/vendor/golang.org/x/text/internal/language/tables.go
+++ /dev/null
@@ -1,3472 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package language
-
-import "golang.org/x/text/internal/tag"
-
-// CLDRVersion is the CLDR version from which the tables in this package are derived.
-const CLDRVersion = "32"
-
-const NumLanguages = 8752
-
-const NumScripts = 258
-
-const NumRegions = 357
-
-type FromTo struct {
- From uint16
- To uint16
-}
-
-const nonCanonicalUnd = 1201
-const (
- _af = 22
- _am = 39
- _ar = 58
- _az = 88
- _bg = 126
- _bn = 165
- _ca = 215
- _cs = 250
- _da = 257
- _de = 269
- _el = 310
- _en = 313
- _es = 318
- _et = 320
- _fa = 328
- _fi = 337
- _fil = 339
- _fr = 350
- _gu = 420
- _he = 444
- _hi = 446
- _hr = 465
- _hu = 469
- _hy = 471
- _id = 481
- _is = 504
- _it = 505
- _ja = 512
- _ka = 528
- _kk = 578
- _km = 586
- _kn = 593
- _ko = 596
- _ky = 650
- _lo = 696
- _lt = 704
- _lv = 711
- _mk = 767
- _ml = 772
- _mn = 779
- _mo = 784
- _mr = 795
- _ms = 799
- _mul = 806
- _my = 817
- _nb = 839
- _ne = 849
- _nl = 871
- _no = 879
- _pa = 925
- _pl = 947
- _pt = 960
- _ro = 988
- _ru = 994
- _sh = 1031
- _si = 1036
- _sk = 1042
- _sl = 1046
- _sq = 1073
- _sr = 1074
- _sv = 1092
- _sw = 1093
- _ta = 1104
- _te = 1121
- _th = 1131
- _tl = 1146
- _tn = 1152
- _tr = 1162
- _uk = 1198
- _ur = 1204
- _uz = 1212
- _vi = 1219
- _zh = 1321
- _zu = 1327
- _jbo = 515
- _ami = 1650
- _bnn = 2357
- _hak = 438
- _tlh = 14467
- _lb = 661
- _nv = 899
- _pwn = 12055
- _tao = 14188
- _tay = 14198
- _tsu = 14662
- _nn = 874
- _sfb = 13629
- _vgt = 15701
- _sgg = 13660
- _cmn = 3007
- _nan = 835
- _hsn = 467
-)
-
-const langPrivateStart = 0x2f72
-
-const langPrivateEnd = 0x3179
-
-// lang holds an alphabetically sorted list of ISO-639 language identifiers.
-// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag.
-// For 2-byte language identifiers, the two successive bytes have the following meaning:
-// - if the first letter of the 2- and 3-letter ISO codes are the same:
-// the second and third letter of the 3-letter ISO code.
-// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3.
-//
-// For 3-byte language identifiers the 4th byte is 0.
-const lang tag.Index = "" + // Size: 5324 bytes
- "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" +
- "cd\x00ace\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey" +
- "\x00affragc\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00a" +
- "jg\x00akkaakk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00am" +
- "p\x00anrganc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00" +
- "ape\x00apr\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars" +
- "\x00ary\x00arz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00a" +
- "tj\x00auy\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx" +
- "\x00ayymayb\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00" +
- "bba\x00bbb\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bc" +
- "m\x00bcn\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00" +
- "bet\x00bew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn" +
- "\x00bgx\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib" +
- "\x00big\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn" +
- "\x00bjo\x00bjr\x00bjt\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt" +
- "\x00bmambmh\x00bmk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00" +
- "bom\x00bon\x00bpy\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx" +
- "\x00brz\x00bsosbsj\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00b" +
- "uc\x00bud\x00bug\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr" +
- "\x00bxh\x00bye\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf" +
- "\x00bzh\x00bzw\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg" +
- "\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00c" +
- "kl\x00cko\x00cky\x00cla\x00cme\x00cmg\x00cooscop\x00cps\x00crrecrh\x00cr" +
- "j\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymda" +
- "andad\x00daf\x00dag\x00dah\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00" +
- "ddn\x00deeuded\x00den\x00dga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia" +
- "\x00dje\x00dnj\x00dob\x00doi\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm" +
- "\x00dtp\x00dts\x00dty\x00dua\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00d" +
- "yo\x00dyu\x00dzzodzg\x00ebu\x00eeweefi\x00egl\x00egy\x00eka\x00eky\x00el" +
- "llema\x00emi\x00enngenn\x00enq\x00eopoeri\x00es\x00\x05esu\x00etstetr" +
- "\x00ett\x00etu\x00etx\x00euusewo\x00ext\x00faasfaa\x00fab\x00fag\x00fai" +
- "\x00fan\x00ffulffi\x00ffm\x00fiinfia\x00fil\x00fit\x00fjijflr\x00fmp\x00" +
- "foaofod\x00fon\x00for\x00fpe\x00fqs\x00frrafrc\x00frp\x00frr\x00frs\x00f" +
- "ub\x00fud\x00fue\x00fuf\x00fuh\x00fuq\x00fur\x00fuv\x00fuy\x00fvr\x00fyr" +
- "ygalegaa\x00gaf\x00gag\x00gah\x00gaj\x00gam\x00gan\x00gaw\x00gay\x00gba" +
- "\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdlagde\x00gdn\x00gdr\x00geb\x00g" +
- "ej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gil\x00gim\x00gjk\x00gjn\x00gju" +
- "\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00gnrngnd\x00gng\x00god\x00gof" +
- "\x00goi\x00gom\x00gon\x00gor\x00gos\x00got\x00grb\x00grc\x00grt\x00grw" +
- "\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00gux\x00guz\x00gvlvgvf" +
- "\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauhag\x00hak\x00ham\x00h" +
- "aw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif\x00hig\x00hih\x00hi" +
- "l\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homo" +
- "hoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui\x00hyyehzerianaian" +
- "\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd\x00idi\x00idu\x00i" +
- "eleife\x00igboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw\x00ikx\x00i" +
- "lo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00\x03iwm\x00i" +
- "ws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00jgk\x00jgo" +
- "\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatkaa\x00kab" +
- "\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp\x00kbq" +
- "\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl\x00kdt" +
- "\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00kgp\x00k" +
- "ha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij\x00kiu" +
- "\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klalkln\x00" +
- "klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw\x00knank" +
- "nf\x00knp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" +
- "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" +
- "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" +
- "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" +
- "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" +
- "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" +
- "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" +
- "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" +
- "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" +
- "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" +
- "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" +
- "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" +
- "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" +
- "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" +
- "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" +
- "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" +
- "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" +
- "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" +
- "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" +
- "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" +
- "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" +
- "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" +
- "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" +
- "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" +
- "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" +
- "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" +
- "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" +
- "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" +
- "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" +
- "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" +
- "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" +
- "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" +
- "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" +
- "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" +
- "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" +
- "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" +
- "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" +
- "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" +
- "\x00sah\x00saq\x00sas\x00sat\x00sav\x00saz\x00sba\x00sbe\x00sbp\x00scrds" +
- "ck\x00scl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00se" +
- "i\x00ses\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn" +
- "\x00shu\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks" +
- "\x00sllvsld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp" +
- "\x00smq\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq" +
- "\x00sou\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx" +
- "\x00ssswssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00" +
- "sur\x00sus\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00s" +
- "yl\x00syr\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf" +
- "\x00tbg\x00tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelt" +
- "ed\x00tem\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00th" +
- "q\x00thr\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr" +
- "\x00tkt\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00" +
- "tog\x00toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssot" +
- "sd\x00tsf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts" +
- "\x00ttt\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00t" +
- "wq\x00txg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli" +
- "\x00umb\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00u" +
- "vh\x00uvl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv" +
- "\x00vls\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj" +
- "\x00wal\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg" +
- "\x00wib\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu" +
- "\x00woolwob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00x" +
- "bi\x00xcr\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna" +
- "\x00xnr\x00xog\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe" +
- "\x00yam\x00yao\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb" +
- "\x00yby\x00yer\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00y" +
- "ooryon\x00yrb\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw" +
- "\x00zahazag\x00zbl\x00zdj\x00zea\x00zgh\x00zhhozhx\x00zia\x00zlm\x00zmi" +
- "\x00zne\x00zuulzxx\x00zza\x00\xff\xff\xff\xff"
-
-const langNoIndexOffset = 1330
-
-// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index
-// in lookup tables. The language ids for these language codes are derived directly
-// from the letters and are not consecutive.
-// Size: 2197 bytes, 2197 elements
-var langNoIndex = [2197]uint8{
- // Entry 0 - 3F
- 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd3, 0x3b, 0xd2,
- 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57,
- 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70,
- 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x62,
- 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77,
- 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2,
- 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a,
- 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff,
- // Entry 40 - 7F
- 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0,
- 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed,
- 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35,
- 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff,
- 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5,
- 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3,
- 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce,
- 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf,
- // Entry 80 - BF
- 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x6f, 0xff, 0xff,
- 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7,
- 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba,
- 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff,
- 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff,
- 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5,
- 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c,
- 0x08, 0x21, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80,
- // Entry C0 - FF
- 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96,
- 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56,
- 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7b, 0xf3, 0xef,
- 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10,
- 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35,
- 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00,
- 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03,
- 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d,
- // Entry 100 - 13F
- 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64,
- 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00,
- 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3,
- 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c,
- 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f,
- 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56,
- 0x90, 0x69, 0x01, 0x2c, 0x96, 0x69, 0x20, 0xfb,
- // Entry 140 - 17F
- 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x16,
- 0x03, 0x00, 0x00, 0xb0, 0x14, 0x03, 0x50, 0x06,
- 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09,
- 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10,
- 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04,
- 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04,
- 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35,
- 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03,
- // Entry 180 - 1BF
- 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98,
- 0x21, 0x18, 0x81, 0x00, 0x00, 0x01, 0x40, 0x82,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea,
- 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
- // Entry 1C0 - 1FF
- 0x00, 0x03, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00,
- 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55,
- 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40,
- 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7a, 0xbf,
- // Entry 200 - 23F
- 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27,
- 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5,
- 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf,
- 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3,
- 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d,
- 0x79, 0xed, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01,
- 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f,
- 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54,
- // Entry 240 - 27F
- 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00,
- 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0,
- 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00,
- 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04,
- 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00,
- 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff,
- 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66,
- // Entry 280 - 2BF
- 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05,
- 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51,
- 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05,
- 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
- 0x0c, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60,
- 0xe7, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80,
- 0x03, 0x00, 0x00, 0x00, 0x8c, 0x50, 0x40, 0x04,
- 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20,
- // Entry 2C0 - 2FF
- 0x02, 0x50, 0x80, 0x11, 0x00, 0x91, 0x6c, 0xe2,
- 0x50, 0x27, 0x1d, 0x11, 0x29, 0x06, 0x59, 0xe9,
- 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00,
- 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d,
- 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00,
- 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01,
- 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x00, 0x08,
- 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00,
- // Entry 300 - 33F
- 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0,
- 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
- 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80,
- 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0,
- 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00,
- 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80,
- 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00,
- 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00,
- // Entry 340 - 37F
- 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3,
- 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb,
- 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6,
- 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff,
- 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff,
- 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0x7f,
- 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f,
- // Entry 380 - 3BF
- 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f,
- 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d,
- 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf,
- 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff,
- 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb,
- 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe,
- 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x3d, 0x1b,
- 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44,
- // Entry 3C0 - 3FF
- 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57,
- 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7,
- 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00,
- 0x40, 0x54, 0x9f, 0x8a, 0xdb, 0xf9, 0x2e, 0x11,
- 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x01,
- 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10,
- 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2,
- 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe,
- // Entry 400 - 43F
- 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f,
- 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7,
- 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f,
- 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b,
- 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7,
- 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe,
- 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde,
- 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf,
- // Entry 440 - 47F
- 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d,
- 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd,
- 0x7f, 0x4e, 0xbf, 0x8f, 0xae, 0xff, 0xee, 0xdf,
- 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7,
- 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce,
- 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xfd,
- 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff,
- 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4,
- // Entry 480 - 4BF
- 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb,
- 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20,
- 0x14, 0x00, 0x55, 0x51, 0x82, 0x65, 0xf5, 0x41,
- 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x05, 0xc5, 0x05,
- 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05,
- 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00,
- 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1,
- // Entry 4C0 - 4FF
- 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed,
- 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40,
- 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83,
- 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7,
- 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00,
- 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d,
- 0xbe, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41,
- // Entry 500 - 53F
- 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49,
- 0x2d, 0x14, 0x27, 0x57, 0xed, 0xf1, 0x3f, 0xe7,
- 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8,
- 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe7, 0xf7,
- 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10,
- 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9,
- 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c,
- 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40,
- // Entry 540 - 57F
- 0x00, 0x00, 0x01, 0x43, 0x19, 0x00, 0x08, 0x00,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- // Entry 580 - 5BF
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d,
- 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf,
- 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
- 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00,
- 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x00, 0x81,
- 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40,
- // Entry 5C0 - 5FF
- 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0x3e, 0x02,
- 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20,
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02,
- 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d,
- 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20,
- 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00,
- 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f,
- 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe,
- // Entry 600 - 63F
- 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9,
- 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1,
- 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7,
- 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd,
- 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f,
- 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe,
- 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18,
- 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f,
- // Entry 640 - 67F
- 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c,
- 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde,
- 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x3f, 0x00, 0x98,
- 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff,
- 0xb9, 0xda, 0x7d, 0xd0, 0x3e, 0x15, 0x7b, 0xb4,
- 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7,
- 0x5f, 0xff, 0xff, 0x9e, 0xdb, 0xf6, 0xd7, 0xb9,
- 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3,
- // Entry 680 - 6BF
- 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37,
- 0xce, 0x7f, 0x04, 0x1d, 0x73, 0x7f, 0xf8, 0xda,
- 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0,
- 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08,
- 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00,
- 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06,
- 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00,
- 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f,
- // Entry 6C0 - 6FF
- 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08,
- 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00,
- 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41,
- 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00,
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab,
- 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00,
- // Entry 700 - 73F
- 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00,
- 0x80, 0x86, 0xc2, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79,
- 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
- 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00,
- 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 740 - 77F
- 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e,
- 0xb0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x44,
- 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04,
- 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a,
- 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75,
- 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03,
- 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60,
- // Entry 780 - 7BF
- 0x03, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01,
- 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00,
- 0x10, 0x03, 0x11, 0x02, 0x01, 0x00, 0x00, 0xf0,
- 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78,
- 0x78, 0x15, 0x50, 0x01, 0xa4, 0x84, 0xa9, 0x41,
- 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x00,
- 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02,
- 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed,
- // Entry 7C0 - 7FF
- 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42,
- 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56,
- 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff,
- 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d,
- 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80,
- 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60,
- 0xfe, 0x01, 0x02, 0x88, 0x0a, 0x40, 0x16, 0x01,
- 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10,
- // Entry 800 - 83F
- 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf,
- 0xbf, 0x03, 0x00, 0x00, 0x10, 0xd4, 0xa3, 0xd1,
- 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3,
- 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80,
- 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84,
- 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93,
- 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10,
- 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00,
- // Entry 840 - 87F
- 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x81,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02,
- 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28,
- 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00,
- 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x14, 0xf1,
- 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50,
- 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40,
- 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1,
- // Entry 880 - 8BF
- 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24,
- 0x0a, 0x00, 0x80, 0x00, 0x00,
-}
-
-// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives
-// to 2-letter language codes that cannot be derived using the method described above.
-// Each 3-letter code is followed by its 1-byte langID.
-const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff"
-
-// altLangIndex is used to convert indexes in altLangISO3 to langIDs.
-// Size: 12 bytes, 6 elements
-var altLangIndex = [6]uint16{
- 0x0281, 0x0407, 0x01fb, 0x03e5, 0x013e, 0x0208,
-}
-
-// AliasMap maps langIDs to their suggested replacements.
-// Size: 716 bytes, 179 elements
-var AliasMap = [179]FromTo{
- 0: {From: 0x82, To: 0x88},
- 1: {From: 0x187, To: 0x1ae},
- 2: {From: 0x1f3, To: 0x1e1},
- 3: {From: 0x1fb, To: 0x1bc},
- 4: {From: 0x208, To: 0x512},
- 5: {From: 0x20f, To: 0x20e},
- 6: {From: 0x310, To: 0x3dc},
- 7: {From: 0x347, To: 0x36f},
- 8: {From: 0x407, To: 0x432},
- 9: {From: 0x47a, To: 0x153},
- 10: {From: 0x490, To: 0x451},
- 11: {From: 0x4a2, To: 0x21},
- 12: {From: 0x53e, To: 0x544},
- 13: {From: 0x58f, To: 0x12d},
- 14: {From: 0x630, To: 0x1eb1},
- 15: {From: 0x651, To: 0x431},
- 16: {From: 0x662, To: 0x431},
- 17: {From: 0x6ed, To: 0x3a},
- 18: {From: 0x6f8, To: 0x1d7},
- 19: {From: 0x709, To: 0x3625},
- 20: {From: 0x73e, To: 0x21a1},
- 21: {From: 0x7b3, To: 0x56},
- 22: {From: 0x7b9, To: 0x299b},
- 23: {From: 0x7c5, To: 0x58},
- 24: {From: 0x7e6, To: 0x145},
- 25: {From: 0x80c, To: 0x5a},
- 26: {From: 0x815, To: 0x8d},
- 27: {From: 0x87e, To: 0x810},
- 28: {From: 0x8a8, To: 0x8b7},
- 29: {From: 0x8c3, To: 0xee3},
- 30: {From: 0x8fa, To: 0x1dc},
- 31: {From: 0x9ef, To: 0x331},
- 32: {From: 0xa36, To: 0x2c5},
- 33: {From: 0xa3d, To: 0xbf},
- 34: {From: 0xabe, To: 0x3322},
- 35: {From: 0xb38, To: 0x529},
- 36: {From: 0xb75, To: 0x265a},
- 37: {From: 0xb7e, To: 0xbc3},
- 38: {From: 0xb9b, To: 0x44e},
- 39: {From: 0xbbc, To: 0x4229},
- 40: {From: 0xbbf, To: 0x529},
- 41: {From: 0xbfe, To: 0x2da7},
- 42: {From: 0xc2e, To: 0x3181},
- 43: {From: 0xcb9, To: 0xf3},
- 44: {From: 0xd08, To: 0xfa},
- 45: {From: 0xdc8, To: 0x11a},
- 46: {From: 0xdd7, To: 0x32d},
- 47: {From: 0xdf8, To: 0xdfb},
- 48: {From: 0xdfe, To: 0x531},
- 49: {From: 0xe01, To: 0xdf3},
- 50: {From: 0xedf, To: 0x205a},
- 51: {From: 0xee9, To: 0x222e},
- 52: {From: 0xeee, To: 0x2e9a},
- 53: {From: 0xf39, To: 0x367},
- 54: {From: 0x10d0, To: 0x140},
- 55: {From: 0x1104, To: 0x2d0},
- 56: {From: 0x11a0, To: 0x1ec},
- 57: {From: 0x1279, To: 0x21},
- 58: {From: 0x1424, To: 0x15e},
- 59: {From: 0x1470, To: 0x14e},
- 60: {From: 0x151f, To: 0xd9b},
- 61: {From: 0x1523, To: 0x390},
- 62: {From: 0x1532, To: 0x19f},
- 63: {From: 0x1580, To: 0x210},
- 64: {From: 0x1583, To: 0x10d},
- 65: {From: 0x15a3, To: 0x3caf},
- 66: {From: 0x1630, To: 0x222e},
- 67: {From: 0x166a, To: 0x19b},
- 68: {From: 0x16c8, To: 0x136},
- 69: {From: 0x1700, To: 0x29f8},
- 70: {From: 0x1718, To: 0x194},
- 71: {From: 0x1727, To: 0xf3f},
- 72: {From: 0x177a, To: 0x178},
- 73: {From: 0x1809, To: 0x17b6},
- 74: {From: 0x1816, To: 0x18f3},
- 75: {From: 0x188a, To: 0x436},
- 76: {From: 0x1979, To: 0x1d01},
- 77: {From: 0x1a74, To: 0x2bb0},
- 78: {From: 0x1a8a, To: 0x1f8},
- 79: {From: 0x1b5a, To: 0x1fa},
- 80: {From: 0x1b86, To: 0x1515},
- 81: {From: 0x1d64, To: 0x2c9b},
- 82: {From: 0x2038, To: 0x37b1},
- 83: {From: 0x203d, To: 0x20dd},
- 84: {From: 0x205a, To: 0x30b},
- 85: {From: 0x20e3, To: 0x274},
- 86: {From: 0x20ee, To: 0x263},
- 87: {From: 0x20f2, To: 0x22d},
- 88: {From: 0x20f9, To: 0x256},
- 89: {From: 0x210f, To: 0x21eb},
- 90: {From: 0x2135, To: 0x27d},
- 91: {From: 0x2160, To: 0x913},
- 92: {From: 0x2199, To: 0x121},
- 93: {From: 0x21ce, To: 0x1561},
- 94: {From: 0x21e6, To: 0x504},
- 95: {From: 0x21f4, To: 0x49f},
- 96: {From: 0x21fb, To: 0x269},
- 97: {From: 0x222d, To: 0x121},
- 98: {From: 0x2237, To: 0x121},
- 99: {From: 0x2262, To: 0x92a},
- 100: {From: 0x2316, To: 0x3226},
- 101: {From: 0x236a, To: 0x2835},
- 102: {From: 0x2382, To: 0x3365},
- 103: {From: 0x2472, To: 0x2c7},
- 104: {From: 0x24e4, To: 0x2ff},
- 105: {From: 0x24f0, To: 0x2fa},
- 106: {From: 0x24fa, To: 0x31f},
- 107: {From: 0x2550, To: 0xb5b},
- 108: {From: 0x25a9, To: 0xe2},
- 109: {From: 0x263e, To: 0x2d0},
- 110: {From: 0x26c9, To: 0x26b4},
- 111: {From: 0x26f9, To: 0x3c8},
- 112: {From: 0x2727, To: 0x3caf},
- 113: {From: 0x2755, To: 0x6a4},
- 114: {From: 0x2765, To: 0x26b4},
- 115: {From: 0x2789, To: 0x4358},
- 116: {From: 0x27c9, To: 0x2001},
- 117: {From: 0x28ea, To: 0x27b1},
- 118: {From: 0x28ef, To: 0x2837},
- 119: {From: 0x2914, To: 0x351},
- 120: {From: 0x2986, To: 0x2da7},
- 121: {From: 0x29f0, To: 0x96b},
- 122: {From: 0x2b1a, To: 0x38d},
- 123: {From: 0x2bfc, To: 0x395},
- 124: {From: 0x2c3f, To: 0x3caf},
- 125: {From: 0x2ce1, To: 0x2201},
- 126: {From: 0x2cfc, To: 0x3be},
- 127: {From: 0x2d13, To: 0x597},
- 128: {From: 0x2d47, To: 0x148},
- 129: {From: 0x2d48, To: 0x148},
- 130: {From: 0x2dff, To: 0x2f1},
- 131: {From: 0x2e08, To: 0x19cc},
- 132: {From: 0x2e1a, To: 0x2d95},
- 133: {From: 0x2e21, To: 0x292},
- 134: {From: 0x2e54, To: 0x7d},
- 135: {From: 0x2e65, To: 0x2282},
- 136: {From: 0x2ea0, To: 0x2e9b},
- 137: {From: 0x2eef, To: 0x2ed7},
- 138: {From: 0x3193, To: 0x3c4},
- 139: {From: 0x3366, To: 0x338e},
- 140: {From: 0x342a, To: 0x3dc},
- 141: {From: 0x34ee, To: 0x18d0},
- 142: {From: 0x35c8, To: 0x2c9b},
- 143: {From: 0x35e6, To: 0x412},
- 144: {From: 0x3658, To: 0x246},
- 145: {From: 0x3676, To: 0x3f4},
- 146: {From: 0x36fd, To: 0x445},
- 147: {From: 0x37c0, To: 0x121},
- 148: {From: 0x3816, To: 0x38f2},
- 149: {From: 0x382a, To: 0x2b48},
- 150: {From: 0x382b, To: 0x2c9b},
- 151: {From: 0x382f, To: 0xa9},
- 152: {From: 0x3832, To: 0x3228},
- 153: {From: 0x386c, To: 0x39a6},
- 154: {From: 0x3892, To: 0x3fc0},
- 155: {From: 0x38a5, To: 0x39d7},
- 156: {From: 0x38b4, To: 0x1fa4},
- 157: {From: 0x38b5, To: 0x2e9a},
- 158: {From: 0x395c, To: 0x47e},
- 159: {From: 0x3b4e, To: 0xd91},
- 160: {From: 0x3b78, To: 0x137},
- 161: {From: 0x3c99, To: 0x4bc},
- 162: {From: 0x3fbd, To: 0x100},
- 163: {From: 0x4208, To: 0xa91},
- 164: {From: 0x42be, To: 0x573},
- 165: {From: 0x42f9, To: 0x3f60},
- 166: {From: 0x4378, To: 0x25a},
- 167: {From: 0x43b8, To: 0xe6c},
- 168: {From: 0x43cd, To: 0x10f},
- 169: {From: 0x44af, To: 0x3322},
- 170: {From: 0x44e3, To: 0x512},
- 171: {From: 0x45ca, To: 0x2409},
- 172: {From: 0x45dd, To: 0x26dc},
- 173: {From: 0x4610, To: 0x48ae},
- 174: {From: 0x46ae, To: 0x46a0},
- 175: {From: 0x473e, To: 0x4745},
- 176: {From: 0x4817, To: 0x3503},
- 177: {From: 0x4916, To: 0x31f},
- 178: {From: 0x49a7, To: 0x523},
-}
-
-// Size: 179 bytes, 179 elements
-var AliasTypes = [179]AliasType{
- // Entry 0 - 3F
- 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2,
- 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, 0, 2,
- 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1,
- 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, 1, 2,
- // Entry 40 - 7F
- 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, 2, 1,
- 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, 1, 1, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0,
- // Entry 80 - BF
- 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0, 2,
- 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1,
- 0, 1, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1,
- 0, 1, 1,
-}
-
-const (
- _Latn = 90
- _Hani = 57
- _Hans = 59
- _Hant = 60
- _Qaaa = 147
- _Qaai = 155
- _Qabx = 196
- _Zinh = 252
- _Zyyy = 257
- _Zzzz = 258
-)
-
-// script is an alphabetically sorted list of ISO 15924 codes. The index
-// of the script in the string, divided by 4, is the internal scriptID.
-const script tag.Index = "" + // Size: 1040 bytes
- "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" +
- "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" +
- "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" +
- "GuruHanbHangHaniHanoHansHantHatrHebrHiraHluwHmngHmnpHrktHungIndsItalJamo" +
- "JavaJpanJurcKaliKanaKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatfLatg" +
- "LatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedfMend" +
- "MercMeroMlymModiMongMoonMrooMteiMultMymrNandNarbNbatNewaNkdbNkgbNkooNshu" +
- "OgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlvPhnxPiqd" +
- "PlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaamQaanQaao" +
- "QaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabeQabfQabg" +
- "QabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabwQabxRanj" +
- "RjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogdSogoSora" +
- "SoyoSundSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglg" +
- "ThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsuxYeziYiiiZanb" +
- "ZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff"
-
-// suppressScript is an index from langID to the dominant script for that language,
-// if it exists. If a script is given, it should be suppressed from the language tag.
-// Size: 1330 bytes, 1330 elements
-var suppressScript = [1330]uint8{
- // Entry 0 - 3F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 40 - 7F
- 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
- // Entry 80 - BF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry C0 - FF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 100 - 13F
- 0x5a, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xea, 0x00, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x5a, 0x00,
- // Entry 140 - 17F
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x5a, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 180 - 1BF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x5a, 0x35, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x22, 0x00,
- // Entry 1C0 - 1FF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x5a, 0x00, 0x5a, 0x5a, 0x00, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x5a, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00,
- // Entry 200 - 23F
- 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 240 - 27F
- 0x00, 0x00, 0x20, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x52, 0x00, 0x00, 0x53, 0x00, 0x22, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 280 - 2BF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 2C0 - 2FF
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
- // Entry 300 - 33F
- 0x00, 0x00, 0x00, 0x00, 0x6e, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x5a,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- // Entry 340 - 37F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x5a, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x5a, 0x00,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 380 - 3BF
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00,
- // Entry 3C0 - 3FF
- 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x00, 0x00, 0x5a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 400 - 43F
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- // Entry 440 - 47F
- 0x00, 0x00, 0x00, 0x00, 0x5a, 0x5a, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xe3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0xe6, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xeb, 0x00, 0x00, 0x00, 0x2c,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00,
- // Entry 480 - 4BF
- 0x5a, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x5a, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 4C0 - 4FF
- 0x5a, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 500 - 53F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a,
- 0x00, 0x00,
-}
-
-const (
- _001 = 1
- _419 = 31
- _BR = 65
- _CA = 73
- _ES = 110
- _GB = 123
- _MD = 188
- _PT = 238
- _UK = 306
- _US = 309
- _ZZ = 357
- _XA = 323
- _XC = 325
- _XK = 333
-)
-
-// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID
-// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for
-// the UN.M49 codes used for groups.)
-const isoRegionOffset = 32
-
-// regionTypes defines the status of a region for various standards.
-// Size: 358 bytes, 358 elements
-var regionTypes = [358]uint8{
- // Entry 0 - 3F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- // Entry 40 - 7F
- 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04,
- 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04,
- 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06,
- 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00,
- 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- // Entry 80 - BF
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x00, 0x04, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- // Entry C0 - FF
- 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00,
- 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, 0x06,
- 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00,
- 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
- // Entry 100 - 13F
- 0x05, 0x05, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x02, 0x06, 0x04, 0x06, 0x06, 0x06,
- 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06,
- // Entry 140 - 17F
- 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
- 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x06, 0x06,
- 0x04, 0x06, 0x06, 0x04, 0x06, 0x05,
-}
-
-// regionISO holds a list of alphabetically sorted 2-letter ISO region codes.
-// Each 2-letter codes is followed by two bytes with the following meaning:
-// - [A-Z}{2}: the first letter of the 2-letter code plus these two
-// letters form the 3-letter ISO code.
-// - 0, n: index into altRegionISO3.
-const regionISO tag.Index = "" + // Size: 1308 bytes
- "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" +
- "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" +
- "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" +
- "CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADOOMDY" +
- "HYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSMFORO" +
- "FQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQNQGR" +
- "RCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERLILSR" +
- "IMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM\x00" +
- "\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSOLTTU" +
- "LUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNPMQTQ" +
- "MRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLDNOOR" +
- "NPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM\x00" +
- "\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSSQTTT" +
- "QU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLBSCYC" +
- "SDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXMSYYR" +
- "SZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTTTOTV" +
- "UVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVNNMVU" +
- "UTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXNNNXO" +
- "OOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUGZAAF" +
- "ZMMBZRARZWWEZZZZ\xff\xff\xff\xff"
-
-// altRegionISO3 holds a list of 3-letter region codes that cannot be
-// mapped to 2-letter codes using the default algorithm. This is a short list.
-const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN"
-
-// altRegionIDs holds a list of regionIDs the positions of which match those
-// of the 3-letter ISO codes in altRegionISO3.
-// Size: 22 bytes, 11 elements
-var altRegionIDs = [11]uint16{
- 0x0057, 0x0070, 0x0088, 0x00a8, 0x00aa, 0x00ad, 0x00ea, 0x0105,
- 0x0121, 0x015f, 0x00dc,
-}
-
-// Size: 80 bytes, 20 elements
-var regionOldMap = [20]FromTo{
- 0: {From: 0x44, To: 0xc4},
- 1: {From: 0x58, To: 0xa7},
- 2: {From: 0x5f, To: 0x60},
- 3: {From: 0x66, To: 0x3b},
- 4: {From: 0x79, To: 0x78},
- 5: {From: 0x93, To: 0x37},
- 6: {From: 0xa3, To: 0x133},
- 7: {From: 0xc1, To: 0x133},
- 8: {From: 0xd7, To: 0x13f},
- 9: {From: 0xdc, To: 0x2b},
- 10: {From: 0xef, To: 0x133},
- 11: {From: 0xf2, To: 0xe2},
- 12: {From: 0xfc, To: 0x70},
- 13: {From: 0x103, To: 0x164},
- 14: {From: 0x12a, To: 0x126},
- 15: {From: 0x132, To: 0x7b},
- 16: {From: 0x13a, To: 0x13e},
- 17: {From: 0x141, To: 0x133},
- 18: {From: 0x15d, To: 0x15e},
- 19: {From: 0x163, To: 0x4b},
-}
-
-// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are
-// codes indicating collections of regions.
-// Size: 716 bytes, 358 elements
-var m49 = [358]int16{
- // Entry 0 - 3F
- 0, 1, 2, 3, 5, 9, 11, 13,
- 14, 15, 17, 18, 19, 21, 29, 30,
- 34, 35, 39, 53, 54, 57, 61, 142,
- 143, 145, 150, 151, 154, 155, 202, 419,
- 958, 0, 20, 784, 4, 28, 660, 8,
- 51, 530, 24, 10, 32, 16, 40, 36,
- 533, 248, 31, 70, 52, 50, 56, 854,
- 100, 48, 108, 204, 652, 60, 96, 68,
- // Entry 40 - 7F
- 535, 76, 44, 64, 104, 74, 72, 112,
- 84, 124, 166, 180, 140, 178, 756, 384,
- 184, 152, 120, 156, 170, 0, 188, 891,
- 296, 192, 132, 531, 162, 196, 203, 278,
- 276, 0, 262, 208, 212, 214, 204, 12,
- 0, 218, 233, 818, 732, 232, 724, 231,
- 967, 0, 246, 242, 238, 583, 234, 0,
- 250, 249, 266, 826, 308, 268, 254, 831,
- // Entry 80 - BF
- 288, 292, 304, 270, 324, 312, 226, 300,
- 239, 320, 316, 624, 328, 344, 334, 340,
- 191, 332, 348, 854, 0, 360, 372, 376,
- 833, 356, 86, 368, 364, 352, 380, 832,
- 388, 400, 392, 581, 404, 417, 116, 296,
- 174, 659, 408, 410, 414, 136, 398, 418,
- 422, 662, 438, 144, 430, 426, 440, 442,
- 428, 434, 504, 492, 498, 499, 663, 450,
- // Entry C0 - FF
- 584, 581, 807, 466, 104, 496, 446, 580,
- 474, 478, 500, 470, 480, 462, 454, 484,
- 458, 508, 516, 540, 562, 574, 566, 548,
- 558, 528, 578, 524, 10, 520, 536, 570,
- 554, 512, 591, 0, 604, 258, 598, 608,
- 586, 616, 666, 612, 630, 275, 620, 581,
- 585, 600, 591, 634, 959, 960, 961, 962,
- 963, 964, 965, 966, 967, 968, 969, 970,
- // Entry 100 - 13F
- 971, 972, 638, 716, 642, 688, 643, 646,
- 682, 90, 690, 729, 752, 702, 654, 705,
- 744, 703, 694, 674, 686, 706, 740, 728,
- 678, 810, 222, 534, 760, 748, 0, 796,
- 148, 260, 768, 764, 762, 772, 626, 795,
- 788, 776, 626, 792, 780, 798, 158, 834,
- 804, 800, 826, 581, 0, 840, 858, 860,
- 336, 670, 704, 862, 92, 850, 704, 548,
- // Entry 140 - 17F
- 876, 581, 882, 973, 974, 975, 976, 977,
- 978, 979, 980, 981, 982, 983, 984, 985,
- 986, 987, 988, 989, 990, 991, 992, 993,
- 994, 995, 996, 997, 998, 720, 887, 175,
- 891, 710, 894, 180, 716, 999,
-}
-
-// m49Index gives indexes into fromM49 based on the three most significant bits
-// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in
-//
-// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]]
-//
-// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code.
-// The region code is stored in the 9 lsb of the indexed value.
-// Size: 18 bytes, 9 elements
-var m49Index = [9]int16{
- 0, 59, 108, 143, 181, 220, 259, 291,
- 333,
-}
-
-// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.
-// Size: 666 bytes, 333 elements
-var fromM49 = [333]uint16{
- // Entry 0 - 3F
- 0x0201, 0x0402, 0x0603, 0x0824, 0x0a04, 0x1027, 0x1205, 0x142b,
- 0x1606, 0x1867, 0x1a07, 0x1c08, 0x1e09, 0x202d, 0x220a, 0x240b,
- 0x260c, 0x2822, 0x2a0d, 0x302a, 0x3825, 0x3a0e, 0x3c0f, 0x3e32,
- 0x402c, 0x4410, 0x4611, 0x482f, 0x4e12, 0x502e, 0x5842, 0x6039,
- 0x6435, 0x6628, 0x6834, 0x6a13, 0x6c14, 0x7036, 0x7215, 0x783d,
- 0x7a16, 0x8043, 0x883f, 0x8c33, 0x9046, 0x9445, 0x9841, 0xa848,
- 0xac9a, 0xb509, 0xb93c, 0xc03e, 0xc838, 0xd0c4, 0xd83a, 0xe047,
- 0xe8a6, 0xf052, 0xf849, 0x085a, 0x10ad, 0x184c, 0x1c17, 0x1e18,
- // Entry 40 - 7F
- 0x20b3, 0x2219, 0x2920, 0x2c1a, 0x2e1b, 0x3051, 0x341c, 0x361d,
- 0x3853, 0x3d2e, 0x445c, 0x4c4a, 0x5454, 0x5ca8, 0x5f5f, 0x644d,
- 0x684b, 0x7050, 0x7856, 0x7e90, 0x8059, 0x885d, 0x941e, 0x965e,
- 0x983b, 0xa063, 0xa864, 0xac65, 0xb469, 0xbd1a, 0xc486, 0xcc6f,
- 0xce6f, 0xd06d, 0xd26a, 0xd476, 0xdc74, 0xde88, 0xe473, 0xec72,
- 0xf031, 0xf279, 0xf478, 0xfc7e, 0x04e5, 0x0921, 0x0c62, 0x147a,
- 0x187d, 0x1c83, 0x26ed, 0x2860, 0x2c5f, 0x3060, 0x4080, 0x4881,
- 0x50a7, 0x5887, 0x6082, 0x687c, 0x7085, 0x788a, 0x8089, 0x8884,
- // Entry 80 - BF
- 0x908c, 0x9891, 0x9c8e, 0xa138, 0xa88f, 0xb08d, 0xb892, 0xc09d,
- 0xc899, 0xd095, 0xd89c, 0xe09b, 0xe896, 0xf097, 0xf89e, 0x004f,
- 0x08a0, 0x10a2, 0x1cae, 0x20a1, 0x28a4, 0x30aa, 0x34ab, 0x3cac,
- 0x42a5, 0x44af, 0x461f, 0x4cb0, 0x54b5, 0x58b8, 0x5cb4, 0x64b9,
- 0x6cb2, 0x70b6, 0x74b7, 0x7cc6, 0x84bf, 0x8cce, 0x94d0, 0x9ccd,
- 0xa4c3, 0xaccb, 0xb4c8, 0xbcc9, 0xc0cc, 0xc8cf, 0xd8bb, 0xe0c5,
- 0xe4bc, 0xe6bd, 0xe8ca, 0xf0ba, 0xf8d1, 0x00e1, 0x08d2, 0x10dd,
- 0x18db, 0x20d9, 0x2429, 0x265b, 0x2a30, 0x2d1b, 0x2e40, 0x30de,
- // Entry C0 - FF
- 0x38d3, 0x493f, 0x54e0, 0x5cd8, 0x64d4, 0x6cd6, 0x74df, 0x7cd5,
- 0x84da, 0x88c7, 0x8b33, 0x8e75, 0x90c0, 0x92f0, 0x94e8, 0x9ee2,
- 0xace6, 0xb0f1, 0xb8e4, 0xc0e7, 0xc8eb, 0xd0e9, 0xd8ee, 0xe08b,
- 0xe526, 0xecec, 0xf4f3, 0xfd02, 0x0504, 0x0706, 0x0d07, 0x183c,
- 0x1d0e, 0x26a9, 0x2826, 0x2cb1, 0x2ebe, 0x34ea, 0x3d39, 0x4513,
- 0x4d18, 0x5508, 0x5d14, 0x6105, 0x650a, 0x6d12, 0x7d0d, 0x7f11,
- 0x813e, 0x830f, 0x8515, 0x8d61, 0x9964, 0xa15d, 0xa86e, 0xb117,
- 0xb30b, 0xb86c, 0xc10b, 0xc916, 0xd110, 0xd91d, 0xe10c, 0xe84e,
- // Entry 100 - 13F
- 0xf11c, 0xf524, 0xf923, 0x0122, 0x0925, 0x1129, 0x192c, 0x2023,
- 0x2928, 0x312b, 0x3727, 0x391f, 0x3d2d, 0x4131, 0x4930, 0x4ec2,
- 0x5519, 0x646b, 0x747b, 0x7e7f, 0x809f, 0x8298, 0x852f, 0x9135,
- 0xa53d, 0xac37, 0xb536, 0xb937, 0xbd3b, 0xd940, 0xe542, 0xed5e,
- 0xef5e, 0xf657, 0xfd62, 0x7c20, 0x7ef4, 0x80f5, 0x82f6, 0x84f7,
- 0x86f8, 0x88f9, 0x8afa, 0x8cfb, 0x8e70, 0x90fd, 0x92fe, 0x94ff,
- 0x9700, 0x9901, 0x9b43, 0x9d44, 0x9f45, 0xa146, 0xa347, 0xa548,
- 0xa749, 0xa94a, 0xab4b, 0xad4c, 0xaf4d, 0xb14e, 0xb34f, 0xb550,
- // Entry 140 - 17F
- 0xb751, 0xb952, 0xbb53, 0xbd54, 0xbf55, 0xc156, 0xc357, 0xc558,
- 0xc759, 0xc95a, 0xcb5b, 0xcd5c, 0xcf65,
-}
-
-// Size: 2014 bytes
-var variantIndex = map[string]uint8{
- "1606nict": 0x0,
- "1694acad": 0x1,
- "1901": 0x2,
- "1959acad": 0x3,
- "1994": 0x61,
- "1996": 0x4,
- "abl1943": 0x5,
- "akuapem": 0x6,
- "alalc97": 0x63,
- "aluku": 0x7,
- "ao1990": 0x8,
- "aranes": 0x9,
- "arevela": 0xa,
- "arevmda": 0xb,
- "arkaika": 0xc,
- "asante": 0xd,
- "auvern": 0xe,
- "baku1926": 0xf,
- "balanka": 0x10,
- "barla": 0x11,
- "basiceng": 0x12,
- "bauddha": 0x13,
- "biscayan": 0x14,
- "biske": 0x5c,
- "bohoric": 0x15,
- "boont": 0x16,
- "bornholm": 0x17,
- "cisaup": 0x18,
- "colb1945": 0x19,
- "cornu": 0x1a,
- "creiss": 0x1b,
- "dajnko": 0x1c,
- "ekavsk": 0x1d,
- "emodeng": 0x1e,
- "fonipa": 0x64,
- "fonkirsh": 0x65,
- "fonnapa": 0x66,
- "fonupa": 0x67,
- "fonxsamp": 0x68,
- "gascon": 0x1f,
- "grclass": 0x20,
- "grital": 0x21,
- "grmistr": 0x22,
- "hepburn": 0x23,
- "heploc": 0x62,
- "hognorsk": 0x24,
- "hsistemo": 0x25,
- "ijekavsk": 0x26,
- "itihasa": 0x27,
- "ivanchov": 0x28,
- "jauer": 0x29,
- "jyutping": 0x2a,
- "kkcor": 0x2b,
- "kociewie": 0x2c,
- "kscor": 0x2d,
- "laukika": 0x2e,
- "lemosin": 0x2f,
- "lengadoc": 0x30,
- "lipaw": 0x5d,
- "luna1918": 0x31,
- "metelko": 0x32,
- "monoton": 0x33,
- "ndyuka": 0x34,
- "nedis": 0x35,
- "newfound": 0x36,
- "nicard": 0x37,
- "njiva": 0x5e,
- "nulik": 0x38,
- "osojs": 0x5f,
- "oxendict": 0x39,
- "pahawh2": 0x3a,
- "pahawh3": 0x3b,
- "pahawh4": 0x3c,
- "pamaka": 0x3d,
- "peano": 0x3e,
- "petr1708": 0x3f,
- "pinyin": 0x40,
- "polyton": 0x41,
- "provenc": 0x42,
- "puter": 0x43,
- "rigik": 0x44,
- "rozaj": 0x45,
- "rumgr": 0x46,
- "scotland": 0x47,
- "scouse": 0x48,
- "simple": 0x69,
- "solba": 0x60,
- "sotav": 0x49,
- "spanglis": 0x4a,
- "surmiran": 0x4b,
- "sursilv": 0x4c,
- "sutsilv": 0x4d,
- "tarask": 0x4e,
- "tongyong": 0x4f,
- "tunumiit": 0x50,
- "uccor": 0x51,
- "ucrcor": 0x52,
- "ulster": 0x53,
- "unifon": 0x54,
- "vaidika": 0x55,
- "valencia": 0x56,
- "vallader": 0x57,
- "vecdruka": 0x58,
- "vivaraup": 0x59,
- "wadegile": 0x5a,
- "xsistemo": 0x5b,
-}
-
-// variantNumSpecialized is the number of specialized variants in variants.
-const variantNumSpecialized = 99
-
-// nRegionGroups is the number of region groups.
-const nRegionGroups = 33
-
-type likelyLangRegion struct {
- lang uint16
- region uint16
-}
-
-// likelyScript is a lookup table, indexed by scriptID, for the most likely
-// languages and regions given a script.
-// Size: 1040 bytes, 260 elements
-var likelyScript = [260]likelyLangRegion{
- 1: {lang: 0x14e, region: 0x84},
- 3: {lang: 0x2a2, region: 0x106},
- 4: {lang: 0x1f, region: 0x99},
- 5: {lang: 0x3a, region: 0x6b},
- 7: {lang: 0x3b, region: 0x9c},
- 8: {lang: 0x1d7, region: 0x28},
- 9: {lang: 0x13, region: 0x9c},
- 10: {lang: 0x5b, region: 0x95},
- 11: {lang: 0x60, region: 0x52},
- 12: {lang: 0xb9, region: 0xb4},
- 13: {lang: 0x63, region: 0x95},
- 14: {lang: 0xa5, region: 0x35},
- 15: {lang: 0x3e9, region: 0x99},
- 17: {lang: 0x529, region: 0x12e},
- 18: {lang: 0x3b1, region: 0x99},
- 19: {lang: 0x15e, region: 0x78},
- 20: {lang: 0xc2, region: 0x95},
- 21: {lang: 0x9d, region: 0xe7},
- 22: {lang: 0xdb, region: 0x35},
- 23: {lang: 0xf3, region: 0x49},
- 24: {lang: 0x4f0, region: 0x12b},
- 25: {lang: 0xe7, region: 0x13e},
- 26: {lang: 0xe5, region: 0x135},
- 29: {lang: 0xf1, region: 0x6b},
- 31: {lang: 0x1a0, region: 0x5d},
- 32: {lang: 0x3e2, region: 0x106},
- 34: {lang: 0x1be, region: 0x99},
- 38: {lang: 0x15e, region: 0x78},
- 41: {lang: 0x133, region: 0x6b},
- 42: {lang: 0x431, region: 0x27},
- 44: {lang: 0x27, region: 0x6f},
- 46: {lang: 0x210, region: 0x7d},
- 47: {lang: 0xfe, region: 0x38},
- 49: {lang: 0x19b, region: 0x99},
- 50: {lang: 0x19e, region: 0x130},
- 51: {lang: 0x3e9, region: 0x99},
- 52: {lang: 0x136, region: 0x87},
- 53: {lang: 0x1a4, region: 0x99},
- 54: {lang: 0x39d, region: 0x99},
- 55: {lang: 0x529, region: 0x12e},
- 56: {lang: 0x254, region: 0xab},
- 57: {lang: 0x529, region: 0x53},
- 58: {lang: 0x1cb, region: 0xe7},
- 59: {lang: 0x529, region: 0x53},
- 60: {lang: 0x529, region: 0x12e},
- 61: {lang: 0x2fd, region: 0x9b},
- 62: {lang: 0x1bc, region: 0x97},
- 63: {lang: 0x200, region: 0xa2},
- 64: {lang: 0x1c5, region: 0x12b},
- 65: {lang: 0x1ca, region: 0xaf},
- 68: {lang: 0x1d5, region: 0x92},
- 70: {lang: 0x142, region: 0x9e},
- 71: {lang: 0x254, region: 0xab},
- 72: {lang: 0x20e, region: 0x95},
- 73: {lang: 0x200, region: 0xa2},
- 75: {lang: 0x135, region: 0xc4},
- 76: {lang: 0x200, region: 0xa2},
- 77: {lang: 0x3bb, region: 0xe8},
- 78: {lang: 0x24a, region: 0xa6},
- 79: {lang: 0x3fa, region: 0x99},
- 82: {lang: 0x251, region: 0x99},
- 83: {lang: 0x254, region: 0xab},
- 85: {lang: 0x88, region: 0x99},
- 86: {lang: 0x370, region: 0x123},
- 87: {lang: 0x2b8, region: 0xaf},
- 92: {lang: 0x29f, region: 0x99},
- 93: {lang: 0x2a8, region: 0x99},
- 94: {lang: 0x28f, region: 0x87},
- 95: {lang: 0x1a0, region: 0x87},
- 96: {lang: 0x2ac, region: 0x53},
- 98: {lang: 0x4f4, region: 0x12b},
- 99: {lang: 0x4f5, region: 0x12b},
- 100: {lang: 0x1be, region: 0x99},
- 102: {lang: 0x337, region: 0x9c},
- 103: {lang: 0x4f7, region: 0x53},
- 104: {lang: 0xa9, region: 0x53},
- 107: {lang: 0x2e8, region: 0x112},
- 108: {lang: 0x4f8, region: 0x10b},
- 109: {lang: 0x4f8, region: 0x10b},
- 110: {lang: 0x304, region: 0x99},
- 111: {lang: 0x31b, region: 0x99},
- 112: {lang: 0x30b, region: 0x53},
- 114: {lang: 0x31e, region: 0x35},
- 115: {lang: 0x30e, region: 0x99},
- 116: {lang: 0x414, region: 0xe8},
- 117: {lang: 0x331, region: 0xc4},
- 119: {lang: 0x4f9, region: 0x108},
- 120: {lang: 0x3b, region: 0xa1},
- 121: {lang: 0x353, region: 0xdb},
- 124: {lang: 0x2d0, region: 0x84},
- 125: {lang: 0x52a, region: 0x53},
- 126: {lang: 0x403, region: 0x96},
- 127: {lang: 0x3ee, region: 0x99},
- 128: {lang: 0x39b, region: 0xc5},
- 129: {lang: 0x395, region: 0x99},
- 130: {lang: 0x399, region: 0x135},
- 131: {lang: 0x429, region: 0x115},
- 133: {lang: 0x3b, region: 0x11c},
- 134: {lang: 0xfd, region: 0xc4},
- 137: {lang: 0x27d, region: 0x106},
- 138: {lang: 0x2c9, region: 0x53},
- 139: {lang: 0x39f, region: 0x9c},
- 140: {lang: 0x39f, region: 0x53},
- 142: {lang: 0x3ad, region: 0xb0},
- 144: {lang: 0x1c6, region: 0x53},
- 145: {lang: 0x4fd, region: 0x9c},
- 198: {lang: 0x3cb, region: 0x95},
- 201: {lang: 0x372, region: 0x10c},
- 202: {lang: 0x420, region: 0x97},
- 204: {lang: 0x4ff, region: 0x15e},
- 205: {lang: 0x3f0, region: 0x99},
- 206: {lang: 0x45, region: 0x135},
- 207: {lang: 0x139, region: 0x7b},
- 208: {lang: 0x3e9, region: 0x99},
- 210: {lang: 0x3e9, region: 0x99},
- 211: {lang: 0x3fa, region: 0x99},
- 212: {lang: 0x40c, region: 0xb3},
- 215: {lang: 0x433, region: 0x99},
- 216: {lang: 0xef, region: 0xc5},
- 217: {lang: 0x43e, region: 0x95},
- 218: {lang: 0x44d, region: 0x35},
- 219: {lang: 0x44e, region: 0x9b},
- 223: {lang: 0x45a, region: 0xe7},
- 224: {lang: 0x11a, region: 0x99},
- 225: {lang: 0x45e, region: 0x53},
- 226: {lang: 0x232, region: 0x53},
- 227: {lang: 0x450, region: 0x99},
- 228: {lang: 0x4a5, region: 0x53},
- 229: {lang: 0x9f, region: 0x13e},
- 230: {lang: 0x461, region: 0x99},
- 232: {lang: 0x528, region: 0xba},
- 233: {lang: 0x153, region: 0xe7},
- 234: {lang: 0x128, region: 0xcd},
- 235: {lang: 0x46b, region: 0x123},
- 236: {lang: 0xa9, region: 0x53},
- 237: {lang: 0x2ce, region: 0x99},
- 240: {lang: 0x4ad, region: 0x11c},
- 241: {lang: 0x4be, region: 0xb4},
- 244: {lang: 0x1ce, region: 0x99},
- 247: {lang: 0x3a9, region: 0x9c},
- 248: {lang: 0x22, region: 0x9b},
- 250: {lang: 0x1ea, region: 0x53},
- 251: {lang: 0xef, region: 0xc5},
-}
-
-type likelyScriptRegion struct {
- region uint16
- script uint16
- flags uint8
-}
-
-// likelyLang is a lookup table, indexed by langID, for the most likely
-// scripts and regions given incomplete information. If more entries exist for a
-// given language, region and script are the index and size respectively
-// of the list in likelyLangList.
-// Size: 7980 bytes, 1330 elements
-var likelyLang = [1330]likelyScriptRegion{
- 0: {region: 0x135, script: 0x5a, flags: 0x0},
- 1: {region: 0x6f, script: 0x5a, flags: 0x0},
- 2: {region: 0x165, script: 0x5a, flags: 0x0},
- 3: {region: 0x165, script: 0x5a, flags: 0x0},
- 4: {region: 0x165, script: 0x5a, flags: 0x0},
- 5: {region: 0x7d, script: 0x20, flags: 0x0},
- 6: {region: 0x165, script: 0x5a, flags: 0x0},
- 7: {region: 0x165, script: 0x20, flags: 0x0},
- 8: {region: 0x80, script: 0x5a, flags: 0x0},
- 9: {region: 0x165, script: 0x5a, flags: 0x0},
- 10: {region: 0x165, script: 0x5a, flags: 0x0},
- 11: {region: 0x165, script: 0x5a, flags: 0x0},
- 12: {region: 0x95, script: 0x5a, flags: 0x0},
- 13: {region: 0x131, script: 0x5a, flags: 0x0},
- 14: {region: 0x80, script: 0x5a, flags: 0x0},
- 15: {region: 0x165, script: 0x5a, flags: 0x0},
- 16: {region: 0x165, script: 0x5a, flags: 0x0},
- 17: {region: 0x106, script: 0x20, flags: 0x0},
- 18: {region: 0x165, script: 0x5a, flags: 0x0},
- 19: {region: 0x9c, script: 0x9, flags: 0x0},
- 20: {region: 0x128, script: 0x5, flags: 0x0},
- 21: {region: 0x165, script: 0x5a, flags: 0x0},
- 22: {region: 0x161, script: 0x5a, flags: 0x0},
- 23: {region: 0x165, script: 0x5a, flags: 0x0},
- 24: {region: 0x165, script: 0x5a, flags: 0x0},
- 25: {region: 0x165, script: 0x5a, flags: 0x0},
- 26: {region: 0x165, script: 0x5a, flags: 0x0},
- 27: {region: 0x165, script: 0x5a, flags: 0x0},
- 28: {region: 0x52, script: 0x5a, flags: 0x0},
- 29: {region: 0x165, script: 0x5a, flags: 0x0},
- 30: {region: 0x165, script: 0x5a, flags: 0x0},
- 31: {region: 0x99, script: 0x4, flags: 0x0},
- 32: {region: 0x165, script: 0x5a, flags: 0x0},
- 33: {region: 0x80, script: 0x5a, flags: 0x0},
- 34: {region: 0x9b, script: 0xf8, flags: 0x0},
- 35: {region: 0x165, script: 0x5a, flags: 0x0},
- 36: {region: 0x165, script: 0x5a, flags: 0x0},
- 37: {region: 0x14d, script: 0x5a, flags: 0x0},
- 38: {region: 0x106, script: 0x20, flags: 0x0},
- 39: {region: 0x6f, script: 0x2c, flags: 0x0},
- 40: {region: 0x165, script: 0x5a, flags: 0x0},
- 41: {region: 0x165, script: 0x5a, flags: 0x0},
- 42: {region: 0xd6, script: 0x5a, flags: 0x0},
- 43: {region: 0x165, script: 0x5a, flags: 0x0},
- 45: {region: 0x165, script: 0x5a, flags: 0x0},
- 46: {region: 0x165, script: 0x5a, flags: 0x0},
- 47: {region: 0x165, script: 0x5a, flags: 0x0},
- 48: {region: 0x165, script: 0x5a, flags: 0x0},
- 49: {region: 0x165, script: 0x5a, flags: 0x0},
- 50: {region: 0x165, script: 0x5a, flags: 0x0},
- 51: {region: 0x95, script: 0x5a, flags: 0x0},
- 52: {region: 0x165, script: 0x5, flags: 0x0},
- 53: {region: 0x122, script: 0x5, flags: 0x0},
- 54: {region: 0x165, script: 0x5a, flags: 0x0},
- 55: {region: 0x165, script: 0x5a, flags: 0x0},
- 56: {region: 0x165, script: 0x5a, flags: 0x0},
- 57: {region: 0x165, script: 0x5a, flags: 0x0},
- 58: {region: 0x6b, script: 0x5, flags: 0x0},
- 59: {region: 0x0, script: 0x3, flags: 0x1},
- 60: {region: 0x165, script: 0x5a, flags: 0x0},
- 61: {region: 0x51, script: 0x5a, flags: 0x0},
- 62: {region: 0x3f, script: 0x5a, flags: 0x0},
- 63: {region: 0x67, script: 0x5, flags: 0x0},
- 65: {region: 0xba, script: 0x5, flags: 0x0},
- 66: {region: 0x6b, script: 0x5, flags: 0x0},
- 67: {region: 0x99, script: 0xe, flags: 0x0},
- 68: {region: 0x12f, script: 0x5a, flags: 0x0},
- 69: {region: 0x135, script: 0xce, flags: 0x0},
- 70: {region: 0x165, script: 0x5a, flags: 0x0},
- 71: {region: 0x165, script: 0x5a, flags: 0x0},
- 72: {region: 0x6e, script: 0x5a, flags: 0x0},
- 73: {region: 0x165, script: 0x5a, flags: 0x0},
- 74: {region: 0x165, script: 0x5a, flags: 0x0},
- 75: {region: 0x49, script: 0x5a, flags: 0x0},
- 76: {region: 0x165, script: 0x5a, flags: 0x0},
- 77: {region: 0x106, script: 0x20, flags: 0x0},
- 78: {region: 0x165, script: 0x5, flags: 0x0},
- 79: {region: 0x165, script: 0x5a, flags: 0x0},
- 80: {region: 0x165, script: 0x5a, flags: 0x0},
- 81: {region: 0x165, script: 0x5a, flags: 0x0},
- 82: {region: 0x99, script: 0x22, flags: 0x0},
- 83: {region: 0x165, script: 0x5a, flags: 0x0},
- 84: {region: 0x165, script: 0x5a, flags: 0x0},
- 85: {region: 0x165, script: 0x5a, flags: 0x0},
- 86: {region: 0x3f, script: 0x5a, flags: 0x0},
- 87: {region: 0x165, script: 0x5a, flags: 0x0},
- 88: {region: 0x3, script: 0x5, flags: 0x1},
- 89: {region: 0x106, script: 0x20, flags: 0x0},
- 90: {region: 0xe8, script: 0x5, flags: 0x0},
- 91: {region: 0x95, script: 0x5a, flags: 0x0},
- 92: {region: 0xdb, script: 0x22, flags: 0x0},
- 93: {region: 0x2e, script: 0x5a, flags: 0x0},
- 94: {region: 0x52, script: 0x5a, flags: 0x0},
- 95: {region: 0x165, script: 0x5a, flags: 0x0},
- 96: {region: 0x52, script: 0xb, flags: 0x0},
- 97: {region: 0x165, script: 0x5a, flags: 0x0},
- 98: {region: 0x165, script: 0x5a, flags: 0x0},
- 99: {region: 0x95, script: 0x5a, flags: 0x0},
- 100: {region: 0x165, script: 0x5a, flags: 0x0},
- 101: {region: 0x52, script: 0x5a, flags: 0x0},
- 102: {region: 0x165, script: 0x5a, flags: 0x0},
- 103: {region: 0x165, script: 0x5a, flags: 0x0},
- 104: {region: 0x165, script: 0x5a, flags: 0x0},
- 105: {region: 0x165, script: 0x5a, flags: 0x0},
- 106: {region: 0x4f, script: 0x5a, flags: 0x0},
- 107: {region: 0x165, script: 0x5a, flags: 0x0},
- 108: {region: 0x165, script: 0x5a, flags: 0x0},
- 109: {region: 0x165, script: 0x5a, flags: 0x0},
- 110: {region: 0x165, script: 0x2c, flags: 0x0},
- 111: {region: 0x165, script: 0x5a, flags: 0x0},
- 112: {region: 0x165, script: 0x5a, flags: 0x0},
- 113: {region: 0x47, script: 0x20, flags: 0x0},
- 114: {region: 0x165, script: 0x5a, flags: 0x0},
- 115: {region: 0x165, script: 0x5a, flags: 0x0},
- 116: {region: 0x10b, script: 0x5, flags: 0x0},
- 117: {region: 0x162, script: 0x5a, flags: 0x0},
- 118: {region: 0x165, script: 0x5a, flags: 0x0},
- 119: {region: 0x95, script: 0x5a, flags: 0x0},
- 120: {region: 0x165, script: 0x5a, flags: 0x0},
- 121: {region: 0x12f, script: 0x5a, flags: 0x0},
- 122: {region: 0x52, script: 0x5a, flags: 0x0},
- 123: {region: 0x99, script: 0xe3, flags: 0x0},
- 124: {region: 0xe8, script: 0x5, flags: 0x0},
- 125: {region: 0x99, script: 0x22, flags: 0x0},
- 126: {region: 0x38, script: 0x20, flags: 0x0},
- 127: {region: 0x99, script: 0x22, flags: 0x0},
- 128: {region: 0xe8, script: 0x5, flags: 0x0},
- 129: {region: 0x12b, script: 0x34, flags: 0x0},
- 131: {region: 0x99, script: 0x22, flags: 0x0},
- 132: {region: 0x165, script: 0x5a, flags: 0x0},
- 133: {region: 0x99, script: 0x22, flags: 0x0},
- 134: {region: 0xe7, script: 0x5a, flags: 0x0},
- 135: {region: 0x165, script: 0x5a, flags: 0x0},
- 136: {region: 0x99, script: 0x22, flags: 0x0},
- 137: {region: 0x165, script: 0x5a, flags: 0x0},
- 138: {region: 0x13f, script: 0x5a, flags: 0x0},
- 139: {region: 0x165, script: 0x5a, flags: 0x0},
- 140: {region: 0x165, script: 0x5a, flags: 0x0},
- 141: {region: 0xe7, script: 0x5a, flags: 0x0},
- 142: {region: 0x165, script: 0x5a, flags: 0x0},
- 143: {region: 0xd6, script: 0x5a, flags: 0x0},
- 144: {region: 0x165, script: 0x5a, flags: 0x0},
- 145: {region: 0x165, script: 0x5a, flags: 0x0},
- 146: {region: 0x165, script: 0x5a, flags: 0x0},
- 147: {region: 0x165, script: 0x2c, flags: 0x0},
- 148: {region: 0x99, script: 0x22, flags: 0x0},
- 149: {region: 0x95, script: 0x5a, flags: 0x0},
- 150: {region: 0x165, script: 0x5a, flags: 0x0},
- 151: {region: 0x165, script: 0x5a, flags: 0x0},
- 152: {region: 0x114, script: 0x5a, flags: 0x0},
- 153: {region: 0x165, script: 0x5a, flags: 0x0},
- 154: {region: 0x165, script: 0x5a, flags: 0x0},
- 155: {region: 0x52, script: 0x5a, flags: 0x0},
- 156: {region: 0x165, script: 0x5a, flags: 0x0},
- 157: {region: 0xe7, script: 0x5a, flags: 0x0},
- 158: {region: 0x165, script: 0x5a, flags: 0x0},
- 159: {region: 0x13e, script: 0xe5, flags: 0x0},
- 160: {region: 0xc3, script: 0x5a, flags: 0x0},
- 161: {region: 0x165, script: 0x5a, flags: 0x0},
- 162: {region: 0x165, script: 0x5a, flags: 0x0},
- 163: {region: 0xc3, script: 0x5a, flags: 0x0},
- 164: {region: 0x165, script: 0x5a, flags: 0x0},
- 165: {region: 0x35, script: 0xe, flags: 0x0},
- 166: {region: 0x165, script: 0x5a, flags: 0x0},
- 167: {region: 0x165, script: 0x5a, flags: 0x0},
- 168: {region: 0x165, script: 0x5a, flags: 0x0},
- 169: {region: 0x53, script: 0xec, flags: 0x0},
- 170: {region: 0x165, script: 0x5a, flags: 0x0},
- 171: {region: 0x165, script: 0x5a, flags: 0x0},
- 172: {region: 0x165, script: 0x5a, flags: 0x0},
- 173: {region: 0x99, script: 0xe, flags: 0x0},
- 174: {region: 0x165, script: 0x5a, flags: 0x0},
- 175: {region: 0x9c, script: 0x5, flags: 0x0},
- 176: {region: 0x165, script: 0x5a, flags: 0x0},
- 177: {region: 0x4f, script: 0x5a, flags: 0x0},
- 178: {region: 0x78, script: 0x5a, flags: 0x0},
- 179: {region: 0x99, script: 0x22, flags: 0x0},
- 180: {region: 0xe8, script: 0x5, flags: 0x0},
- 181: {region: 0x99, script: 0x22, flags: 0x0},
- 182: {region: 0x165, script: 0x5a, flags: 0x0},
- 183: {region: 0x33, script: 0x5a, flags: 0x0},
- 184: {region: 0x165, script: 0x5a, flags: 0x0},
- 185: {region: 0xb4, script: 0xc, flags: 0x0},
- 186: {region: 0x52, script: 0x5a, flags: 0x0},
- 187: {region: 0x165, script: 0x2c, flags: 0x0},
- 188: {region: 0xe7, script: 0x5a, flags: 0x0},
- 189: {region: 0x165, script: 0x5a, flags: 0x0},
- 190: {region: 0xe8, script: 0x22, flags: 0x0},
- 191: {region: 0x106, script: 0x20, flags: 0x0},
- 192: {region: 0x15f, script: 0x5a, flags: 0x0},
- 193: {region: 0x165, script: 0x5a, flags: 0x0},
- 194: {region: 0x95, script: 0x5a, flags: 0x0},
- 195: {region: 0x165, script: 0x5a, flags: 0x0},
- 196: {region: 0x52, script: 0x5a, flags: 0x0},
- 197: {region: 0x165, script: 0x5a, flags: 0x0},
- 198: {region: 0x165, script: 0x5a, flags: 0x0},
- 199: {region: 0x165, script: 0x5a, flags: 0x0},
- 200: {region: 0x86, script: 0x5a, flags: 0x0},
- 201: {region: 0x165, script: 0x5a, flags: 0x0},
- 202: {region: 0x165, script: 0x5a, flags: 0x0},
- 203: {region: 0x165, script: 0x5a, flags: 0x0},
- 204: {region: 0x165, script: 0x5a, flags: 0x0},
- 205: {region: 0x6d, script: 0x2c, flags: 0x0},
- 206: {region: 0x165, script: 0x5a, flags: 0x0},
- 207: {region: 0x165, script: 0x5a, flags: 0x0},
- 208: {region: 0x52, script: 0x5a, flags: 0x0},
- 209: {region: 0x165, script: 0x5a, flags: 0x0},
- 210: {region: 0x165, script: 0x5a, flags: 0x0},
- 211: {region: 0xc3, script: 0x5a, flags: 0x0},
- 212: {region: 0x165, script: 0x5a, flags: 0x0},
- 213: {region: 0x165, script: 0x5a, flags: 0x0},
- 214: {region: 0x165, script: 0x5a, flags: 0x0},
- 215: {region: 0x6e, script: 0x5a, flags: 0x0},
- 216: {region: 0x165, script: 0x5a, flags: 0x0},
- 217: {region: 0x165, script: 0x5a, flags: 0x0},
- 218: {region: 0xd6, script: 0x5a, flags: 0x0},
- 219: {region: 0x35, script: 0x16, flags: 0x0},
- 220: {region: 0x106, script: 0x20, flags: 0x0},
- 221: {region: 0xe7, script: 0x5a, flags: 0x0},
- 222: {region: 0x165, script: 0x5a, flags: 0x0},
- 223: {region: 0x131, script: 0x5a, flags: 0x0},
- 224: {region: 0x8a, script: 0x5a, flags: 0x0},
- 225: {region: 0x75, script: 0x5a, flags: 0x0},
- 226: {region: 0x106, script: 0x20, flags: 0x0},
- 227: {region: 0x135, script: 0x5a, flags: 0x0},
- 228: {region: 0x49, script: 0x5a, flags: 0x0},
- 229: {region: 0x135, script: 0x1a, flags: 0x0},
- 230: {region: 0xa6, script: 0x5, flags: 0x0},
- 231: {region: 0x13e, script: 0x19, flags: 0x0},
- 232: {region: 0x165, script: 0x5a, flags: 0x0},
- 233: {region: 0x9b, script: 0x5, flags: 0x0},
- 234: {region: 0x165, script: 0x5a, flags: 0x0},
- 235: {region: 0x165, script: 0x5a, flags: 0x0},
- 236: {region: 0x165, script: 0x5a, flags: 0x0},
- 237: {region: 0x165, script: 0x5a, flags: 0x0},
- 238: {region: 0x165, script: 0x5a, flags: 0x0},
- 239: {region: 0xc5, script: 0xd8, flags: 0x0},
- 240: {region: 0x78, script: 0x5a, flags: 0x0},
- 241: {region: 0x6b, script: 0x1d, flags: 0x0},
- 242: {region: 0xe7, script: 0x5a, flags: 0x0},
- 243: {region: 0x49, script: 0x17, flags: 0x0},
- 244: {region: 0x130, script: 0x20, flags: 0x0},
- 245: {region: 0x49, script: 0x17, flags: 0x0},
- 246: {region: 0x49, script: 0x17, flags: 0x0},
- 247: {region: 0x49, script: 0x17, flags: 0x0},
- 248: {region: 0x49, script: 0x17, flags: 0x0},
- 249: {region: 0x10a, script: 0x5a, flags: 0x0},
- 250: {region: 0x5e, script: 0x5a, flags: 0x0},
- 251: {region: 0xe9, script: 0x5a, flags: 0x0},
- 252: {region: 0x49, script: 0x17, flags: 0x0},
- 253: {region: 0xc4, script: 0x86, flags: 0x0},
- 254: {region: 0x8, script: 0x2, flags: 0x1},
- 255: {region: 0x106, script: 0x20, flags: 0x0},
- 256: {region: 0x7b, script: 0x5a, flags: 0x0},
- 257: {region: 0x63, script: 0x5a, flags: 0x0},
- 258: {region: 0x165, script: 0x5a, flags: 0x0},
- 259: {region: 0x165, script: 0x5a, flags: 0x0},
- 260: {region: 0x165, script: 0x5a, flags: 0x0},
- 261: {region: 0x165, script: 0x5a, flags: 0x0},
- 262: {region: 0x135, script: 0x5a, flags: 0x0},
- 263: {region: 0x106, script: 0x20, flags: 0x0},
- 264: {region: 0xa4, script: 0x5a, flags: 0x0},
- 265: {region: 0x165, script: 0x5a, flags: 0x0},
- 266: {region: 0x165, script: 0x5a, flags: 0x0},
- 267: {region: 0x99, script: 0x5, flags: 0x0},
- 268: {region: 0x165, script: 0x5a, flags: 0x0},
- 269: {region: 0x60, script: 0x5a, flags: 0x0},
- 270: {region: 0x165, script: 0x5a, flags: 0x0},
- 271: {region: 0x49, script: 0x5a, flags: 0x0},
- 272: {region: 0x165, script: 0x5a, flags: 0x0},
- 273: {region: 0x165, script: 0x5a, flags: 0x0},
- 274: {region: 0x165, script: 0x5a, flags: 0x0},
- 275: {region: 0x165, script: 0x5, flags: 0x0},
- 276: {region: 0x49, script: 0x5a, flags: 0x0},
- 277: {region: 0x165, script: 0x5a, flags: 0x0},
- 278: {region: 0x165, script: 0x5a, flags: 0x0},
- 279: {region: 0xd4, script: 0x5a, flags: 0x0},
- 280: {region: 0x4f, script: 0x5a, flags: 0x0},
- 281: {region: 0x165, script: 0x5a, flags: 0x0},
- 282: {region: 0x99, script: 0x5, flags: 0x0},
- 283: {region: 0x165, script: 0x5a, flags: 0x0},
- 284: {region: 0x165, script: 0x5a, flags: 0x0},
- 285: {region: 0x165, script: 0x5a, flags: 0x0},
- 286: {region: 0x165, script: 0x2c, flags: 0x0},
- 287: {region: 0x60, script: 0x5a, flags: 0x0},
- 288: {region: 0xc3, script: 0x5a, flags: 0x0},
- 289: {region: 0xd0, script: 0x5a, flags: 0x0},
- 290: {region: 0x165, script: 0x5a, flags: 0x0},
- 291: {region: 0xdb, script: 0x22, flags: 0x0},
- 292: {region: 0x52, script: 0x5a, flags: 0x0},
- 293: {region: 0x165, script: 0x5a, flags: 0x0},
- 294: {region: 0x165, script: 0x5a, flags: 0x0},
- 295: {region: 0x165, script: 0x5a, flags: 0x0},
- 296: {region: 0xcd, script: 0xea, flags: 0x0},
- 297: {region: 0x165, script: 0x5a, flags: 0x0},
- 298: {region: 0x165, script: 0x5a, flags: 0x0},
- 299: {region: 0x114, script: 0x5a, flags: 0x0},
- 300: {region: 0x37, script: 0x5a, flags: 0x0},
- 301: {region: 0x43, script: 0xec, flags: 0x0},
- 302: {region: 0x165, script: 0x5a, flags: 0x0},
- 303: {region: 0xa4, script: 0x5a, flags: 0x0},
- 304: {region: 0x80, script: 0x5a, flags: 0x0},
- 305: {region: 0xd6, script: 0x5a, flags: 0x0},
- 306: {region: 0x9e, script: 0x5a, flags: 0x0},
- 307: {region: 0x6b, script: 0x29, flags: 0x0},
- 308: {region: 0x165, script: 0x5a, flags: 0x0},
- 309: {region: 0xc4, script: 0x4b, flags: 0x0},
- 310: {region: 0x87, script: 0x34, flags: 0x0},
- 311: {region: 0x165, script: 0x5a, flags: 0x0},
- 312: {region: 0x165, script: 0x5a, flags: 0x0},
- 313: {region: 0xa, script: 0x2, flags: 0x1},
- 314: {region: 0x165, script: 0x5a, flags: 0x0},
- 315: {region: 0x165, script: 0x5a, flags: 0x0},
- 316: {region: 0x1, script: 0x5a, flags: 0x0},
- 317: {region: 0x165, script: 0x5a, flags: 0x0},
- 318: {region: 0x6e, script: 0x5a, flags: 0x0},
- 319: {region: 0x135, script: 0x5a, flags: 0x0},
- 320: {region: 0x6a, script: 0x5a, flags: 0x0},
- 321: {region: 0x165, script: 0x5a, flags: 0x0},
- 322: {region: 0x9e, script: 0x46, flags: 0x0},
- 323: {region: 0x165, script: 0x5a, flags: 0x0},
- 324: {region: 0x165, script: 0x5a, flags: 0x0},
- 325: {region: 0x6e, script: 0x5a, flags: 0x0},
- 326: {region: 0x52, script: 0x5a, flags: 0x0},
- 327: {region: 0x6e, script: 0x5a, flags: 0x0},
- 328: {region: 0x9c, script: 0x5, flags: 0x0},
- 329: {region: 0x165, script: 0x5a, flags: 0x0},
- 330: {region: 0x165, script: 0x5a, flags: 0x0},
- 331: {region: 0x165, script: 0x5a, flags: 0x0},
- 332: {region: 0x165, script: 0x5a, flags: 0x0},
- 333: {region: 0x86, script: 0x5a, flags: 0x0},
- 334: {region: 0xc, script: 0x2, flags: 0x1},
- 335: {region: 0x165, script: 0x5a, flags: 0x0},
- 336: {region: 0xc3, script: 0x5a, flags: 0x0},
- 337: {region: 0x72, script: 0x5a, flags: 0x0},
- 338: {region: 0x10b, script: 0x5, flags: 0x0},
- 339: {region: 0xe7, script: 0x5a, flags: 0x0},
- 340: {region: 0x10c, script: 0x5a, flags: 0x0},
- 341: {region: 0x73, script: 0x5a, flags: 0x0},
- 342: {region: 0x165, script: 0x5a, flags: 0x0},
- 343: {region: 0x165, script: 0x5a, flags: 0x0},
- 344: {region: 0x76, script: 0x5a, flags: 0x0},
- 345: {region: 0x165, script: 0x5a, flags: 0x0},
- 346: {region: 0x3b, script: 0x5a, flags: 0x0},
- 347: {region: 0x165, script: 0x5a, flags: 0x0},
- 348: {region: 0x165, script: 0x5a, flags: 0x0},
- 349: {region: 0x165, script: 0x5a, flags: 0x0},
- 350: {region: 0x78, script: 0x5a, flags: 0x0},
- 351: {region: 0x135, script: 0x5a, flags: 0x0},
- 352: {region: 0x78, script: 0x5a, flags: 0x0},
- 353: {region: 0x60, script: 0x5a, flags: 0x0},
- 354: {region: 0x60, script: 0x5a, flags: 0x0},
- 355: {region: 0x52, script: 0x5, flags: 0x0},
- 356: {region: 0x140, script: 0x5a, flags: 0x0},
- 357: {region: 0x165, script: 0x5a, flags: 0x0},
- 358: {region: 0x84, script: 0x5a, flags: 0x0},
- 359: {region: 0x165, script: 0x5a, flags: 0x0},
- 360: {region: 0xd4, script: 0x5a, flags: 0x0},
- 361: {region: 0x9e, script: 0x5a, flags: 0x0},
- 362: {region: 0xd6, script: 0x5a, flags: 0x0},
- 363: {region: 0x165, script: 0x5a, flags: 0x0},
- 364: {region: 0x10b, script: 0x5a, flags: 0x0},
- 365: {region: 0xd9, script: 0x5a, flags: 0x0},
- 366: {region: 0x96, script: 0x5a, flags: 0x0},
- 367: {region: 0x80, script: 0x5a, flags: 0x0},
- 368: {region: 0x165, script: 0x5a, flags: 0x0},
- 369: {region: 0xbc, script: 0x5a, flags: 0x0},
- 370: {region: 0x165, script: 0x5a, flags: 0x0},
- 371: {region: 0x165, script: 0x5a, flags: 0x0},
- 372: {region: 0x165, script: 0x5a, flags: 0x0},
- 373: {region: 0x53, script: 0x3b, flags: 0x0},
- 374: {region: 0x165, script: 0x5a, flags: 0x0},
- 375: {region: 0x95, script: 0x5a, flags: 0x0},
- 376: {region: 0x165, script: 0x5a, flags: 0x0},
- 377: {region: 0x165, script: 0x5a, flags: 0x0},
- 378: {region: 0x99, script: 0x22, flags: 0x0},
- 379: {region: 0x165, script: 0x5a, flags: 0x0},
- 380: {region: 0x9c, script: 0x5, flags: 0x0},
- 381: {region: 0x7e, script: 0x5a, flags: 0x0},
- 382: {region: 0x7b, script: 0x5a, flags: 0x0},
- 383: {region: 0x165, script: 0x5a, flags: 0x0},
- 384: {region: 0x165, script: 0x5a, flags: 0x0},
- 385: {region: 0x165, script: 0x5a, flags: 0x0},
- 386: {region: 0x165, script: 0x5a, flags: 0x0},
- 387: {region: 0x165, script: 0x5a, flags: 0x0},
- 388: {region: 0x165, script: 0x5a, flags: 0x0},
- 389: {region: 0x6f, script: 0x2c, flags: 0x0},
- 390: {region: 0x165, script: 0x5a, flags: 0x0},
- 391: {region: 0xdb, script: 0x22, flags: 0x0},
- 392: {region: 0x165, script: 0x5a, flags: 0x0},
- 393: {region: 0xa7, script: 0x5a, flags: 0x0},
- 394: {region: 0x165, script: 0x5a, flags: 0x0},
- 395: {region: 0xe8, script: 0x5, flags: 0x0},
- 396: {region: 0x165, script: 0x5a, flags: 0x0},
- 397: {region: 0xe8, script: 0x5, flags: 0x0},
- 398: {region: 0x165, script: 0x5a, flags: 0x0},
- 399: {region: 0x165, script: 0x5a, flags: 0x0},
- 400: {region: 0x6e, script: 0x5a, flags: 0x0},
- 401: {region: 0x9c, script: 0x5, flags: 0x0},
- 402: {region: 0x165, script: 0x5a, flags: 0x0},
- 403: {region: 0x165, script: 0x2c, flags: 0x0},
- 404: {region: 0xf1, script: 0x5a, flags: 0x0},
- 405: {region: 0x165, script: 0x5a, flags: 0x0},
- 406: {region: 0x165, script: 0x5a, flags: 0x0},
- 407: {region: 0x165, script: 0x5a, flags: 0x0},
- 408: {region: 0x165, script: 0x2c, flags: 0x0},
- 409: {region: 0x165, script: 0x5a, flags: 0x0},
- 410: {region: 0x99, script: 0x22, flags: 0x0},
- 411: {region: 0x99, script: 0xe6, flags: 0x0},
- 412: {region: 0x95, script: 0x5a, flags: 0x0},
- 413: {region: 0xd9, script: 0x5a, flags: 0x0},
- 414: {region: 0x130, script: 0x32, flags: 0x0},
- 415: {region: 0x165, script: 0x5a, flags: 0x0},
- 416: {region: 0xe, script: 0x2, flags: 0x1},
- 417: {region: 0x99, script: 0xe, flags: 0x0},
- 418: {region: 0x165, script: 0x5a, flags: 0x0},
- 419: {region: 0x4e, script: 0x5a, flags: 0x0},
- 420: {region: 0x99, script: 0x35, flags: 0x0},
- 421: {region: 0x41, script: 0x5a, flags: 0x0},
- 422: {region: 0x54, script: 0x5a, flags: 0x0},
- 423: {region: 0x165, script: 0x5a, flags: 0x0},
- 424: {region: 0x80, script: 0x5a, flags: 0x0},
- 425: {region: 0x165, script: 0x5a, flags: 0x0},
- 426: {region: 0x165, script: 0x5a, flags: 0x0},
- 427: {region: 0xa4, script: 0x5a, flags: 0x0},
- 428: {region: 0x98, script: 0x5a, flags: 0x0},
- 429: {region: 0x165, script: 0x5a, flags: 0x0},
- 430: {region: 0xdb, script: 0x22, flags: 0x0},
- 431: {region: 0x165, script: 0x5a, flags: 0x0},
- 432: {region: 0x165, script: 0x5, flags: 0x0},
- 433: {region: 0x49, script: 0x5a, flags: 0x0},
- 434: {region: 0x165, script: 0x5, flags: 0x0},
- 435: {region: 0x165, script: 0x5a, flags: 0x0},
- 436: {region: 0x10, script: 0x3, flags: 0x1},
- 437: {region: 0x165, script: 0x5a, flags: 0x0},
- 438: {region: 0x53, script: 0x3b, flags: 0x0},
- 439: {region: 0x165, script: 0x5a, flags: 0x0},
- 440: {region: 0x135, script: 0x5a, flags: 0x0},
- 441: {region: 0x24, script: 0x5, flags: 0x0},
- 442: {region: 0x165, script: 0x5a, flags: 0x0},
- 443: {region: 0x165, script: 0x2c, flags: 0x0},
- 444: {region: 0x97, script: 0x3e, flags: 0x0},
- 445: {region: 0x165, script: 0x5a, flags: 0x0},
- 446: {region: 0x99, script: 0x22, flags: 0x0},
- 447: {region: 0x165, script: 0x5a, flags: 0x0},
- 448: {region: 0x73, script: 0x5a, flags: 0x0},
- 449: {region: 0x165, script: 0x5a, flags: 0x0},
- 450: {region: 0x165, script: 0x5a, flags: 0x0},
- 451: {region: 0xe7, script: 0x5a, flags: 0x0},
- 452: {region: 0x165, script: 0x5a, flags: 0x0},
- 453: {region: 0x12b, script: 0x40, flags: 0x0},
- 454: {region: 0x53, script: 0x90, flags: 0x0},
- 455: {region: 0x165, script: 0x5a, flags: 0x0},
- 456: {region: 0xe8, script: 0x5, flags: 0x0},
- 457: {region: 0x99, script: 0x22, flags: 0x0},
- 458: {region: 0xaf, script: 0x41, flags: 0x0},
- 459: {region: 0xe7, script: 0x5a, flags: 0x0},
- 460: {region: 0xe8, script: 0x5, flags: 0x0},
- 461: {region: 0xe6, script: 0x5a, flags: 0x0},
- 462: {region: 0x99, script: 0x22, flags: 0x0},
- 463: {region: 0x99, script: 0x22, flags: 0x0},
- 464: {region: 0x165, script: 0x5a, flags: 0x0},
- 465: {region: 0x90, script: 0x5a, flags: 0x0},
- 466: {region: 0x60, script: 0x5a, flags: 0x0},
- 467: {region: 0x53, script: 0x3b, flags: 0x0},
- 468: {region: 0x91, script: 0x5a, flags: 0x0},
- 469: {region: 0x92, script: 0x5a, flags: 0x0},
- 470: {region: 0x165, script: 0x5a, flags: 0x0},
- 471: {region: 0x28, script: 0x8, flags: 0x0},
- 472: {region: 0xd2, script: 0x5a, flags: 0x0},
- 473: {region: 0x78, script: 0x5a, flags: 0x0},
- 474: {region: 0x165, script: 0x5a, flags: 0x0},
- 475: {region: 0x165, script: 0x5a, flags: 0x0},
- 476: {region: 0xd0, script: 0x5a, flags: 0x0},
- 477: {region: 0xd6, script: 0x5a, flags: 0x0},
- 478: {region: 0x165, script: 0x5a, flags: 0x0},
- 479: {region: 0x165, script: 0x5a, flags: 0x0},
- 480: {region: 0x165, script: 0x5a, flags: 0x0},
- 481: {region: 0x95, script: 0x5a, flags: 0x0},
- 482: {region: 0x165, script: 0x5a, flags: 0x0},
- 483: {region: 0x165, script: 0x5a, flags: 0x0},
- 484: {region: 0x165, script: 0x5a, flags: 0x0},
- 486: {region: 0x122, script: 0x5a, flags: 0x0},
- 487: {region: 0xd6, script: 0x5a, flags: 0x0},
- 488: {region: 0x165, script: 0x5a, flags: 0x0},
- 489: {region: 0x165, script: 0x5a, flags: 0x0},
- 490: {region: 0x53, script: 0xfa, flags: 0x0},
- 491: {region: 0x165, script: 0x5a, flags: 0x0},
- 492: {region: 0x135, script: 0x5a, flags: 0x0},
- 493: {region: 0x165, script: 0x5a, flags: 0x0},
- 494: {region: 0x49, script: 0x5a, flags: 0x0},
- 495: {region: 0x165, script: 0x5a, flags: 0x0},
- 496: {region: 0x165, script: 0x5a, flags: 0x0},
- 497: {region: 0xe7, script: 0x5a, flags: 0x0},
- 498: {region: 0x165, script: 0x5a, flags: 0x0},
- 499: {region: 0x95, script: 0x5a, flags: 0x0},
- 500: {region: 0x106, script: 0x20, flags: 0x0},
- 501: {region: 0x1, script: 0x5a, flags: 0x0},
- 502: {region: 0x165, script: 0x5a, flags: 0x0},
- 503: {region: 0x165, script: 0x5a, flags: 0x0},
- 504: {region: 0x9d, script: 0x5a, flags: 0x0},
- 505: {region: 0x9e, script: 0x5a, flags: 0x0},
- 506: {region: 0x49, script: 0x17, flags: 0x0},
- 507: {region: 0x97, script: 0x3e, flags: 0x0},
- 508: {region: 0x165, script: 0x5a, flags: 0x0},
- 509: {region: 0x165, script: 0x5a, flags: 0x0},
- 510: {region: 0x106, script: 0x5a, flags: 0x0},
- 511: {region: 0x165, script: 0x5a, flags: 0x0},
- 512: {region: 0xa2, script: 0x49, flags: 0x0},
- 513: {region: 0x165, script: 0x5a, flags: 0x0},
- 514: {region: 0xa0, script: 0x5a, flags: 0x0},
- 515: {region: 0x1, script: 0x5a, flags: 0x0},
- 516: {region: 0x165, script: 0x5a, flags: 0x0},
- 517: {region: 0x165, script: 0x5a, flags: 0x0},
- 518: {region: 0x165, script: 0x5a, flags: 0x0},
- 519: {region: 0x52, script: 0x5a, flags: 0x0},
- 520: {region: 0x130, script: 0x3e, flags: 0x0},
- 521: {region: 0x165, script: 0x5a, flags: 0x0},
- 522: {region: 0x12f, script: 0x5a, flags: 0x0},
- 523: {region: 0xdb, script: 0x22, flags: 0x0},
- 524: {region: 0x165, script: 0x5a, flags: 0x0},
- 525: {region: 0x63, script: 0x5a, flags: 0x0},
- 526: {region: 0x95, script: 0x5a, flags: 0x0},
- 527: {region: 0x95, script: 0x5a, flags: 0x0},
- 528: {region: 0x7d, script: 0x2e, flags: 0x0},
- 529: {region: 0x137, script: 0x20, flags: 0x0},
- 530: {region: 0x67, script: 0x5a, flags: 0x0},
- 531: {region: 0xc4, script: 0x5a, flags: 0x0},
- 532: {region: 0x165, script: 0x5a, flags: 0x0},
- 533: {region: 0x165, script: 0x5a, flags: 0x0},
- 534: {region: 0xd6, script: 0x5a, flags: 0x0},
- 535: {region: 0xa4, script: 0x5a, flags: 0x0},
- 536: {region: 0xc3, script: 0x5a, flags: 0x0},
- 537: {region: 0x106, script: 0x20, flags: 0x0},
- 538: {region: 0x165, script: 0x5a, flags: 0x0},
- 539: {region: 0x165, script: 0x5a, flags: 0x0},
- 540: {region: 0x165, script: 0x5a, flags: 0x0},
- 541: {region: 0x165, script: 0x5a, flags: 0x0},
- 542: {region: 0xd4, script: 0x5, flags: 0x0},
- 543: {region: 0xd6, script: 0x5a, flags: 0x0},
- 544: {region: 0x164, script: 0x5a, flags: 0x0},
- 545: {region: 0x165, script: 0x5a, flags: 0x0},
- 546: {region: 0x165, script: 0x5a, flags: 0x0},
- 547: {region: 0x12f, script: 0x5a, flags: 0x0},
- 548: {region: 0x122, script: 0x5, flags: 0x0},
- 549: {region: 0x165, script: 0x5a, flags: 0x0},
- 550: {region: 0x123, script: 0xeb, flags: 0x0},
- 551: {region: 0x5a, script: 0x5a, flags: 0x0},
- 552: {region: 0x52, script: 0x5a, flags: 0x0},
- 553: {region: 0x165, script: 0x5a, flags: 0x0},
- 554: {region: 0x4f, script: 0x5a, flags: 0x0},
- 555: {region: 0x99, script: 0x22, flags: 0x0},
- 556: {region: 0x99, script: 0x22, flags: 0x0},
- 557: {region: 0x4b, script: 0x5a, flags: 0x0},
- 558: {region: 0x95, script: 0x5a, flags: 0x0},
- 559: {region: 0x165, script: 0x5a, flags: 0x0},
- 560: {region: 0x41, script: 0x5a, flags: 0x0},
- 561: {region: 0x99, script: 0x5a, flags: 0x0},
- 562: {region: 0x53, script: 0xe2, flags: 0x0},
- 563: {region: 0x99, script: 0x22, flags: 0x0},
- 564: {region: 0xc3, script: 0x5a, flags: 0x0},
- 565: {region: 0x165, script: 0x5a, flags: 0x0},
- 566: {region: 0x99, script: 0x75, flags: 0x0},
- 567: {region: 0xe8, script: 0x5, flags: 0x0},
- 568: {region: 0x165, script: 0x5a, flags: 0x0},
- 569: {region: 0xa4, script: 0x5a, flags: 0x0},
- 570: {region: 0x165, script: 0x5a, flags: 0x0},
- 571: {region: 0x12b, script: 0x5a, flags: 0x0},
- 572: {region: 0x165, script: 0x5a, flags: 0x0},
- 573: {region: 0xd2, script: 0x5a, flags: 0x0},
- 574: {region: 0x165, script: 0x5a, flags: 0x0},
- 575: {region: 0xaf, script: 0x57, flags: 0x0},
- 576: {region: 0x165, script: 0x5a, flags: 0x0},
- 577: {region: 0x165, script: 0x5a, flags: 0x0},
- 578: {region: 0x13, script: 0x6, flags: 0x1},
- 579: {region: 0x165, script: 0x5a, flags: 0x0},
- 580: {region: 0x52, script: 0x5a, flags: 0x0},
- 581: {region: 0x82, script: 0x5a, flags: 0x0},
- 582: {region: 0xa4, script: 0x5a, flags: 0x0},
- 583: {region: 0x165, script: 0x5a, flags: 0x0},
- 584: {region: 0x165, script: 0x5a, flags: 0x0},
- 585: {region: 0x165, script: 0x5a, flags: 0x0},
- 586: {region: 0xa6, script: 0x4e, flags: 0x0},
- 587: {region: 0x2a, script: 0x5a, flags: 0x0},
- 588: {region: 0x165, script: 0x5a, flags: 0x0},
- 589: {region: 0x165, script: 0x5a, flags: 0x0},
- 590: {region: 0x165, script: 0x5a, flags: 0x0},
- 591: {region: 0x165, script: 0x5a, flags: 0x0},
- 592: {region: 0x165, script: 0x5a, flags: 0x0},
- 593: {region: 0x99, script: 0x52, flags: 0x0},
- 594: {region: 0x8b, script: 0x5a, flags: 0x0},
- 595: {region: 0x165, script: 0x5a, flags: 0x0},
- 596: {region: 0xab, script: 0x53, flags: 0x0},
- 597: {region: 0x106, script: 0x20, flags: 0x0},
- 598: {region: 0x99, script: 0x22, flags: 0x0},
- 599: {region: 0x165, script: 0x5a, flags: 0x0},
- 600: {region: 0x75, script: 0x5a, flags: 0x0},
- 601: {region: 0x165, script: 0x5a, flags: 0x0},
- 602: {region: 0xb4, script: 0x5a, flags: 0x0},
- 603: {region: 0x165, script: 0x5a, flags: 0x0},
- 604: {region: 0x165, script: 0x5a, flags: 0x0},
- 605: {region: 0x165, script: 0x5a, flags: 0x0},
- 606: {region: 0x165, script: 0x5a, flags: 0x0},
- 607: {region: 0x165, script: 0x5a, flags: 0x0},
- 608: {region: 0x165, script: 0x5a, flags: 0x0},
- 609: {region: 0x165, script: 0x5a, flags: 0x0},
- 610: {region: 0x165, script: 0x2c, flags: 0x0},
- 611: {region: 0x165, script: 0x5a, flags: 0x0},
- 612: {region: 0x106, script: 0x20, flags: 0x0},
- 613: {region: 0x112, script: 0x5a, flags: 0x0},
- 614: {region: 0xe7, script: 0x5a, flags: 0x0},
- 615: {region: 0x106, script: 0x5a, flags: 0x0},
- 616: {region: 0x165, script: 0x5a, flags: 0x0},
- 617: {region: 0x99, script: 0x22, flags: 0x0},
- 618: {region: 0x99, script: 0x5, flags: 0x0},
- 619: {region: 0x12f, script: 0x5a, flags: 0x0},
- 620: {region: 0x165, script: 0x5a, flags: 0x0},
- 621: {region: 0x52, script: 0x5a, flags: 0x0},
- 622: {region: 0x60, script: 0x5a, flags: 0x0},
- 623: {region: 0x165, script: 0x5a, flags: 0x0},
- 624: {region: 0x165, script: 0x5a, flags: 0x0},
- 625: {region: 0x165, script: 0x2c, flags: 0x0},
- 626: {region: 0x165, script: 0x5a, flags: 0x0},
- 627: {region: 0x165, script: 0x5a, flags: 0x0},
- 628: {region: 0x19, script: 0x3, flags: 0x1},
- 629: {region: 0x165, script: 0x5a, flags: 0x0},
- 630: {region: 0x165, script: 0x5a, flags: 0x0},
- 631: {region: 0x165, script: 0x5a, flags: 0x0},
- 632: {region: 0x165, script: 0x5a, flags: 0x0},
- 633: {region: 0x106, script: 0x20, flags: 0x0},
- 634: {region: 0x165, script: 0x5a, flags: 0x0},
- 635: {region: 0x165, script: 0x5a, flags: 0x0},
- 636: {region: 0x165, script: 0x5a, flags: 0x0},
- 637: {region: 0x106, script: 0x20, flags: 0x0},
- 638: {region: 0x165, script: 0x5a, flags: 0x0},
- 639: {region: 0x95, script: 0x5a, flags: 0x0},
- 640: {region: 0xe8, script: 0x5, flags: 0x0},
- 641: {region: 0x7b, script: 0x5a, flags: 0x0},
- 642: {region: 0x165, script: 0x5a, flags: 0x0},
- 643: {region: 0x165, script: 0x5a, flags: 0x0},
- 644: {region: 0x165, script: 0x5a, flags: 0x0},
- 645: {region: 0x165, script: 0x2c, flags: 0x0},
- 646: {region: 0x123, script: 0xeb, flags: 0x0},
- 647: {region: 0xe8, script: 0x5, flags: 0x0},
- 648: {region: 0x165, script: 0x5a, flags: 0x0},
- 649: {region: 0x165, script: 0x5a, flags: 0x0},
- 650: {region: 0x1c, script: 0x5, flags: 0x1},
- 651: {region: 0x165, script: 0x5a, flags: 0x0},
- 652: {region: 0x165, script: 0x5a, flags: 0x0},
- 653: {region: 0x165, script: 0x5a, flags: 0x0},
- 654: {region: 0x138, script: 0x5a, flags: 0x0},
- 655: {region: 0x87, script: 0x5e, flags: 0x0},
- 656: {region: 0x97, script: 0x3e, flags: 0x0},
- 657: {region: 0x12f, script: 0x5a, flags: 0x0},
- 658: {region: 0xe8, script: 0x5, flags: 0x0},
- 659: {region: 0x131, script: 0x5a, flags: 0x0},
- 660: {region: 0x165, script: 0x5a, flags: 0x0},
- 661: {region: 0xb7, script: 0x5a, flags: 0x0},
- 662: {region: 0x106, script: 0x20, flags: 0x0},
- 663: {region: 0x165, script: 0x5a, flags: 0x0},
- 664: {region: 0x95, script: 0x5a, flags: 0x0},
- 665: {region: 0x165, script: 0x5a, flags: 0x0},
- 666: {region: 0x53, script: 0xeb, flags: 0x0},
- 667: {region: 0x165, script: 0x5a, flags: 0x0},
- 668: {region: 0x165, script: 0x5a, flags: 0x0},
- 669: {region: 0x165, script: 0x5a, flags: 0x0},
- 670: {region: 0x165, script: 0x5a, flags: 0x0},
- 671: {region: 0x99, script: 0x5c, flags: 0x0},
- 672: {region: 0x165, script: 0x5a, flags: 0x0},
- 673: {region: 0x165, script: 0x5a, flags: 0x0},
- 674: {region: 0x106, script: 0x20, flags: 0x0},
- 675: {region: 0x131, script: 0x5a, flags: 0x0},
- 676: {region: 0x165, script: 0x5a, flags: 0x0},
- 677: {region: 0xd9, script: 0x5a, flags: 0x0},
- 678: {region: 0x165, script: 0x5a, flags: 0x0},
- 679: {region: 0x165, script: 0x5a, flags: 0x0},
- 680: {region: 0x21, script: 0x2, flags: 0x1},
- 681: {region: 0x165, script: 0x5a, flags: 0x0},
- 682: {region: 0x165, script: 0x5a, flags: 0x0},
- 683: {region: 0x9e, script: 0x5a, flags: 0x0},
- 684: {region: 0x53, script: 0x60, flags: 0x0},
- 685: {region: 0x95, script: 0x5a, flags: 0x0},
- 686: {region: 0x9c, script: 0x5, flags: 0x0},
- 687: {region: 0x135, script: 0x5a, flags: 0x0},
- 688: {region: 0x165, script: 0x5a, flags: 0x0},
- 689: {region: 0x165, script: 0x5a, flags: 0x0},
- 690: {region: 0x99, script: 0xe6, flags: 0x0},
- 691: {region: 0x9e, script: 0x5a, flags: 0x0},
- 692: {region: 0x165, script: 0x5a, flags: 0x0},
- 693: {region: 0x4b, script: 0x5a, flags: 0x0},
- 694: {region: 0x165, script: 0x5a, flags: 0x0},
- 695: {region: 0x165, script: 0x5a, flags: 0x0},
- 696: {region: 0xaf, script: 0x57, flags: 0x0},
- 697: {region: 0x165, script: 0x5a, flags: 0x0},
- 698: {region: 0x165, script: 0x5a, flags: 0x0},
- 699: {region: 0x4b, script: 0x5a, flags: 0x0},
- 700: {region: 0x165, script: 0x5a, flags: 0x0},
- 701: {region: 0x165, script: 0x5a, flags: 0x0},
- 702: {region: 0x162, script: 0x5a, flags: 0x0},
- 703: {region: 0x9c, script: 0x5, flags: 0x0},
- 704: {region: 0xb6, script: 0x5a, flags: 0x0},
- 705: {region: 0xb8, script: 0x5a, flags: 0x0},
- 706: {region: 0x4b, script: 0x5a, flags: 0x0},
- 707: {region: 0x4b, script: 0x5a, flags: 0x0},
- 708: {region: 0xa4, script: 0x5a, flags: 0x0},
- 709: {region: 0xa4, script: 0x5a, flags: 0x0},
- 710: {region: 0x9c, script: 0x5, flags: 0x0},
- 711: {region: 0xb8, script: 0x5a, flags: 0x0},
- 712: {region: 0x123, script: 0xeb, flags: 0x0},
- 713: {region: 0x53, script: 0x3b, flags: 0x0},
- 714: {region: 0x12b, script: 0x5a, flags: 0x0},
- 715: {region: 0x95, script: 0x5a, flags: 0x0},
- 716: {region: 0x52, script: 0x5a, flags: 0x0},
- 717: {region: 0x99, script: 0x22, flags: 0x0},
- 718: {region: 0x99, script: 0x22, flags: 0x0},
- 719: {region: 0x95, script: 0x5a, flags: 0x0},
- 720: {region: 0x23, script: 0x3, flags: 0x1},
- 721: {region: 0xa4, script: 0x5a, flags: 0x0},
- 722: {region: 0x165, script: 0x5a, flags: 0x0},
- 723: {region: 0xcf, script: 0x5a, flags: 0x0},
- 724: {region: 0x165, script: 0x5a, flags: 0x0},
- 725: {region: 0x165, script: 0x5a, flags: 0x0},
- 726: {region: 0x165, script: 0x5a, flags: 0x0},
- 727: {region: 0x165, script: 0x5a, flags: 0x0},
- 728: {region: 0x165, script: 0x5a, flags: 0x0},
- 729: {region: 0x165, script: 0x5a, flags: 0x0},
- 730: {region: 0x165, script: 0x5a, flags: 0x0},
- 731: {region: 0x165, script: 0x5a, flags: 0x0},
- 732: {region: 0x165, script: 0x5a, flags: 0x0},
- 733: {region: 0x165, script: 0x5a, flags: 0x0},
- 734: {region: 0x165, script: 0x5a, flags: 0x0},
- 735: {region: 0x165, script: 0x5, flags: 0x0},
- 736: {region: 0x106, script: 0x20, flags: 0x0},
- 737: {region: 0xe7, script: 0x5a, flags: 0x0},
- 738: {region: 0x165, script: 0x5a, flags: 0x0},
- 739: {region: 0x95, script: 0x5a, flags: 0x0},
- 740: {region: 0x165, script: 0x2c, flags: 0x0},
- 741: {region: 0x165, script: 0x5a, flags: 0x0},
- 742: {region: 0x165, script: 0x5a, flags: 0x0},
- 743: {region: 0x165, script: 0x5a, flags: 0x0},
- 744: {region: 0x112, script: 0x5a, flags: 0x0},
- 745: {region: 0xa4, script: 0x5a, flags: 0x0},
- 746: {region: 0x165, script: 0x5a, flags: 0x0},
- 747: {region: 0x165, script: 0x5a, flags: 0x0},
- 748: {region: 0x123, script: 0x5, flags: 0x0},
- 749: {region: 0xcc, script: 0x5a, flags: 0x0},
- 750: {region: 0x165, script: 0x5a, flags: 0x0},
- 751: {region: 0x165, script: 0x5a, flags: 0x0},
- 752: {region: 0x165, script: 0x5a, flags: 0x0},
- 753: {region: 0xbf, script: 0x5a, flags: 0x0},
- 754: {region: 0xd1, script: 0x5a, flags: 0x0},
- 755: {region: 0x165, script: 0x5a, flags: 0x0},
- 756: {region: 0x52, script: 0x5a, flags: 0x0},
- 757: {region: 0xdb, script: 0x22, flags: 0x0},
- 758: {region: 0x12f, script: 0x5a, flags: 0x0},
- 759: {region: 0xc0, script: 0x5a, flags: 0x0},
- 760: {region: 0x165, script: 0x5a, flags: 0x0},
- 761: {region: 0x165, script: 0x5a, flags: 0x0},
- 762: {region: 0xe0, script: 0x5a, flags: 0x0},
- 763: {region: 0x165, script: 0x5a, flags: 0x0},
- 764: {region: 0x95, script: 0x5a, flags: 0x0},
- 765: {region: 0x9b, script: 0x3d, flags: 0x0},
- 766: {region: 0x165, script: 0x5a, flags: 0x0},
- 767: {region: 0xc2, script: 0x20, flags: 0x0},
- 768: {region: 0x165, script: 0x5, flags: 0x0},
- 769: {region: 0x165, script: 0x5a, flags: 0x0},
- 770: {region: 0x165, script: 0x5a, flags: 0x0},
- 771: {region: 0x165, script: 0x5a, flags: 0x0},
- 772: {region: 0x99, script: 0x6e, flags: 0x0},
- 773: {region: 0x165, script: 0x5a, flags: 0x0},
- 774: {region: 0x165, script: 0x5a, flags: 0x0},
- 775: {region: 0x10b, script: 0x5a, flags: 0x0},
- 776: {region: 0x165, script: 0x5a, flags: 0x0},
- 777: {region: 0x165, script: 0x5a, flags: 0x0},
- 778: {region: 0x165, script: 0x5a, flags: 0x0},
- 779: {region: 0x26, script: 0x3, flags: 0x1},
- 780: {region: 0x165, script: 0x5a, flags: 0x0},
- 781: {region: 0x165, script: 0x5a, flags: 0x0},
- 782: {region: 0x99, script: 0xe, flags: 0x0},
- 783: {region: 0xc4, script: 0x75, flags: 0x0},
- 785: {region: 0x165, script: 0x5a, flags: 0x0},
- 786: {region: 0x49, script: 0x5a, flags: 0x0},
- 787: {region: 0x49, script: 0x5a, flags: 0x0},
- 788: {region: 0x37, script: 0x5a, flags: 0x0},
- 789: {region: 0x165, script: 0x5a, flags: 0x0},
- 790: {region: 0x165, script: 0x5a, flags: 0x0},
- 791: {region: 0x165, script: 0x5a, flags: 0x0},
- 792: {region: 0x165, script: 0x5a, flags: 0x0},
- 793: {region: 0x165, script: 0x5a, flags: 0x0},
- 794: {region: 0x165, script: 0x5a, flags: 0x0},
- 795: {region: 0x99, script: 0x22, flags: 0x0},
- 796: {region: 0xdb, script: 0x22, flags: 0x0},
- 797: {region: 0x106, script: 0x20, flags: 0x0},
- 798: {region: 0x35, script: 0x72, flags: 0x0},
- 799: {region: 0x29, script: 0x3, flags: 0x1},
- 800: {region: 0xcb, script: 0x5a, flags: 0x0},
- 801: {region: 0x165, script: 0x5a, flags: 0x0},
- 802: {region: 0x165, script: 0x5a, flags: 0x0},
- 803: {region: 0x165, script: 0x5a, flags: 0x0},
- 804: {region: 0x99, script: 0x22, flags: 0x0},
- 805: {region: 0x52, script: 0x5a, flags: 0x0},
- 807: {region: 0x165, script: 0x5a, flags: 0x0},
- 808: {region: 0x135, script: 0x5a, flags: 0x0},
- 809: {region: 0x165, script: 0x5a, flags: 0x0},
- 810: {region: 0x165, script: 0x5a, flags: 0x0},
- 811: {region: 0xe8, script: 0x5, flags: 0x0},
- 812: {region: 0xc3, script: 0x5a, flags: 0x0},
- 813: {region: 0x99, script: 0x22, flags: 0x0},
- 814: {region: 0x95, script: 0x5a, flags: 0x0},
- 815: {region: 0x164, script: 0x5a, flags: 0x0},
- 816: {region: 0x165, script: 0x5a, flags: 0x0},
- 817: {region: 0xc4, script: 0x75, flags: 0x0},
- 818: {region: 0x165, script: 0x5a, flags: 0x0},
- 819: {region: 0x165, script: 0x2c, flags: 0x0},
- 820: {region: 0x106, script: 0x20, flags: 0x0},
- 821: {region: 0x165, script: 0x5a, flags: 0x0},
- 822: {region: 0x131, script: 0x5a, flags: 0x0},
- 823: {region: 0x9c, script: 0x66, flags: 0x0},
- 824: {region: 0x165, script: 0x5a, flags: 0x0},
- 825: {region: 0x165, script: 0x5a, flags: 0x0},
- 826: {region: 0x9c, script: 0x5, flags: 0x0},
- 827: {region: 0x165, script: 0x5a, flags: 0x0},
- 828: {region: 0x165, script: 0x5a, flags: 0x0},
- 829: {region: 0x165, script: 0x5a, flags: 0x0},
- 830: {region: 0xdd, script: 0x5a, flags: 0x0},
- 831: {region: 0x165, script: 0x5a, flags: 0x0},
- 832: {region: 0x165, script: 0x5a, flags: 0x0},
- 834: {region: 0x165, script: 0x5a, flags: 0x0},
- 835: {region: 0x53, script: 0x3b, flags: 0x0},
- 836: {region: 0x9e, script: 0x5a, flags: 0x0},
- 837: {region: 0xd2, script: 0x5a, flags: 0x0},
- 838: {region: 0x165, script: 0x5a, flags: 0x0},
- 839: {region: 0xda, script: 0x5a, flags: 0x0},
- 840: {region: 0x165, script: 0x5a, flags: 0x0},
- 841: {region: 0x165, script: 0x5a, flags: 0x0},
- 842: {region: 0x165, script: 0x5a, flags: 0x0},
- 843: {region: 0xcf, script: 0x5a, flags: 0x0},
- 844: {region: 0x165, script: 0x5a, flags: 0x0},
- 845: {region: 0x165, script: 0x5a, flags: 0x0},
- 846: {region: 0x164, script: 0x5a, flags: 0x0},
- 847: {region: 0xd1, script: 0x5a, flags: 0x0},
- 848: {region: 0x60, script: 0x5a, flags: 0x0},
- 849: {region: 0xdb, script: 0x22, flags: 0x0},
- 850: {region: 0x165, script: 0x5a, flags: 0x0},
- 851: {region: 0xdb, script: 0x22, flags: 0x0},
- 852: {region: 0x165, script: 0x5a, flags: 0x0},
- 853: {region: 0x165, script: 0x5a, flags: 0x0},
- 854: {region: 0xd2, script: 0x5a, flags: 0x0},
- 855: {region: 0x165, script: 0x5a, flags: 0x0},
- 856: {region: 0x165, script: 0x5a, flags: 0x0},
- 857: {region: 0xd1, script: 0x5a, flags: 0x0},
- 858: {region: 0x165, script: 0x5a, flags: 0x0},
- 859: {region: 0xcf, script: 0x5a, flags: 0x0},
- 860: {region: 0xcf, script: 0x5a, flags: 0x0},
- 861: {region: 0x165, script: 0x5a, flags: 0x0},
- 862: {region: 0x165, script: 0x5a, flags: 0x0},
- 863: {region: 0x95, script: 0x5a, flags: 0x0},
- 864: {region: 0x165, script: 0x5a, flags: 0x0},
- 865: {region: 0xdf, script: 0x5a, flags: 0x0},
- 866: {region: 0x165, script: 0x5a, flags: 0x0},
- 867: {region: 0x165, script: 0x5a, flags: 0x0},
- 868: {region: 0x99, script: 0x5a, flags: 0x0},
- 869: {region: 0x165, script: 0x5a, flags: 0x0},
- 870: {region: 0x165, script: 0x5a, flags: 0x0},
- 871: {region: 0xd9, script: 0x5a, flags: 0x0},
- 872: {region: 0x52, script: 0x5a, flags: 0x0},
- 873: {region: 0x165, script: 0x5a, flags: 0x0},
- 874: {region: 0xda, script: 0x5a, flags: 0x0},
- 875: {region: 0x165, script: 0x5a, flags: 0x0},
- 876: {region: 0x52, script: 0x5a, flags: 0x0},
- 877: {region: 0x165, script: 0x5a, flags: 0x0},
- 878: {region: 0x165, script: 0x5a, flags: 0x0},
- 879: {region: 0xda, script: 0x5a, flags: 0x0},
- 880: {region: 0x123, script: 0x56, flags: 0x0},
- 881: {region: 0x99, script: 0x22, flags: 0x0},
- 882: {region: 0x10c, script: 0xc9, flags: 0x0},
- 883: {region: 0x165, script: 0x5a, flags: 0x0},
- 884: {region: 0x165, script: 0x5a, flags: 0x0},
- 885: {region: 0x84, script: 0x7c, flags: 0x0},
- 886: {region: 0x161, script: 0x5a, flags: 0x0},
- 887: {region: 0x165, script: 0x5a, flags: 0x0},
- 888: {region: 0x49, script: 0x17, flags: 0x0},
- 889: {region: 0x165, script: 0x5a, flags: 0x0},
- 890: {region: 0x161, script: 0x5a, flags: 0x0},
- 891: {region: 0x165, script: 0x5a, flags: 0x0},
- 892: {region: 0x165, script: 0x5a, flags: 0x0},
- 893: {region: 0x165, script: 0x5a, flags: 0x0},
- 894: {region: 0x165, script: 0x5a, flags: 0x0},
- 895: {region: 0x165, script: 0x5a, flags: 0x0},
- 896: {region: 0x117, script: 0x5a, flags: 0x0},
- 897: {region: 0x165, script: 0x5a, flags: 0x0},
- 898: {region: 0x165, script: 0x5a, flags: 0x0},
- 899: {region: 0x135, script: 0x5a, flags: 0x0},
- 900: {region: 0x165, script: 0x5a, flags: 0x0},
- 901: {region: 0x53, script: 0x5a, flags: 0x0},
- 902: {region: 0x165, script: 0x5a, flags: 0x0},
- 903: {region: 0xce, script: 0x5a, flags: 0x0},
- 904: {region: 0x12f, script: 0x5a, flags: 0x0},
- 905: {region: 0x131, script: 0x5a, flags: 0x0},
- 906: {region: 0x80, script: 0x5a, flags: 0x0},
- 907: {region: 0x78, script: 0x5a, flags: 0x0},
- 908: {region: 0x165, script: 0x5a, flags: 0x0},
- 910: {region: 0x165, script: 0x5a, flags: 0x0},
- 911: {region: 0x165, script: 0x5a, flags: 0x0},
- 912: {region: 0x6f, script: 0x5a, flags: 0x0},
- 913: {region: 0x165, script: 0x5a, flags: 0x0},
- 914: {region: 0x165, script: 0x5a, flags: 0x0},
- 915: {region: 0x165, script: 0x5a, flags: 0x0},
- 916: {region: 0x165, script: 0x5a, flags: 0x0},
- 917: {region: 0x99, script: 0x81, flags: 0x0},
- 918: {region: 0x165, script: 0x5a, flags: 0x0},
- 919: {region: 0x165, script: 0x5, flags: 0x0},
- 920: {region: 0x7d, script: 0x20, flags: 0x0},
- 921: {region: 0x135, script: 0x82, flags: 0x0},
- 922: {region: 0x165, script: 0x5, flags: 0x0},
- 923: {region: 0xc5, script: 0x80, flags: 0x0},
- 924: {region: 0x165, script: 0x5a, flags: 0x0},
- 925: {region: 0x2c, script: 0x3, flags: 0x1},
- 926: {region: 0xe7, script: 0x5a, flags: 0x0},
- 927: {region: 0x2f, script: 0x2, flags: 0x1},
- 928: {region: 0xe7, script: 0x5a, flags: 0x0},
- 929: {region: 0x30, script: 0x5a, flags: 0x0},
- 930: {region: 0xf0, script: 0x5a, flags: 0x0},
- 931: {region: 0x165, script: 0x5a, flags: 0x0},
- 932: {region: 0x78, script: 0x5a, flags: 0x0},
- 933: {region: 0xd6, script: 0x5a, flags: 0x0},
- 934: {region: 0x135, script: 0x5a, flags: 0x0},
- 935: {region: 0x49, script: 0x5a, flags: 0x0},
- 936: {region: 0x165, script: 0x5a, flags: 0x0},
- 937: {region: 0x9c, script: 0xf7, flags: 0x0},
- 938: {region: 0x165, script: 0x5a, flags: 0x0},
- 939: {region: 0x60, script: 0x5a, flags: 0x0},
- 940: {region: 0x165, script: 0x5, flags: 0x0},
- 941: {region: 0xb0, script: 0x8e, flags: 0x0},
- 943: {region: 0x165, script: 0x5a, flags: 0x0},
- 944: {region: 0x165, script: 0x5a, flags: 0x0},
- 945: {region: 0x99, script: 0x12, flags: 0x0},
- 946: {region: 0xa4, script: 0x5a, flags: 0x0},
- 947: {region: 0xe9, script: 0x5a, flags: 0x0},
- 948: {region: 0x165, script: 0x5a, flags: 0x0},
- 949: {region: 0x9e, script: 0x5a, flags: 0x0},
- 950: {region: 0x165, script: 0x5a, flags: 0x0},
- 951: {region: 0x165, script: 0x5a, flags: 0x0},
- 952: {region: 0x87, script: 0x34, flags: 0x0},
- 953: {region: 0x75, script: 0x5a, flags: 0x0},
- 954: {region: 0x165, script: 0x5a, flags: 0x0},
- 955: {region: 0xe8, script: 0x4d, flags: 0x0},
- 956: {region: 0x9c, script: 0x5, flags: 0x0},
- 957: {region: 0x1, script: 0x5a, flags: 0x0},
- 958: {region: 0x24, script: 0x5, flags: 0x0},
- 959: {region: 0x165, script: 0x5a, flags: 0x0},
- 960: {region: 0x41, script: 0x5a, flags: 0x0},
- 961: {region: 0x165, script: 0x5a, flags: 0x0},
- 962: {region: 0x7a, script: 0x5a, flags: 0x0},
- 963: {region: 0x165, script: 0x5a, flags: 0x0},
- 964: {region: 0xe4, script: 0x5a, flags: 0x0},
- 965: {region: 0x89, script: 0x5a, flags: 0x0},
- 966: {region: 0x69, script: 0x5a, flags: 0x0},
- 967: {region: 0x165, script: 0x5a, flags: 0x0},
- 968: {region: 0x99, script: 0x22, flags: 0x0},
- 969: {region: 0x165, script: 0x5a, flags: 0x0},
- 970: {region: 0x102, script: 0x5a, flags: 0x0},
- 971: {region: 0x95, script: 0x5a, flags: 0x0},
- 972: {region: 0x165, script: 0x5a, flags: 0x0},
- 973: {region: 0x165, script: 0x5a, flags: 0x0},
- 974: {region: 0x9e, script: 0x5a, flags: 0x0},
- 975: {region: 0x165, script: 0x5, flags: 0x0},
- 976: {region: 0x99, script: 0x5a, flags: 0x0},
- 977: {region: 0x31, script: 0x2, flags: 0x1},
- 978: {region: 0xdb, script: 0x22, flags: 0x0},
- 979: {region: 0x35, script: 0xe, flags: 0x0},
- 980: {region: 0x4e, script: 0x5a, flags: 0x0},
- 981: {region: 0x72, script: 0x5a, flags: 0x0},
- 982: {region: 0x4e, script: 0x5a, flags: 0x0},
- 983: {region: 0x9c, script: 0x5, flags: 0x0},
- 984: {region: 0x10c, script: 0x5a, flags: 0x0},
- 985: {region: 0x3a, script: 0x5a, flags: 0x0},
- 986: {region: 0x165, script: 0x5a, flags: 0x0},
- 987: {region: 0xd1, script: 0x5a, flags: 0x0},
- 988: {region: 0x104, script: 0x5a, flags: 0x0},
- 989: {region: 0x95, script: 0x5a, flags: 0x0},
- 990: {region: 0x12f, script: 0x5a, flags: 0x0},
- 991: {region: 0x165, script: 0x5a, flags: 0x0},
- 992: {region: 0x165, script: 0x5a, flags: 0x0},
- 993: {region: 0x73, script: 0x5a, flags: 0x0},
- 994: {region: 0x106, script: 0x20, flags: 0x0},
- 995: {region: 0x130, script: 0x20, flags: 0x0},
- 996: {region: 0x109, script: 0x5a, flags: 0x0},
- 997: {region: 0x107, script: 0x5a, flags: 0x0},
- 998: {region: 0x12f, script: 0x5a, flags: 0x0},
- 999: {region: 0x165, script: 0x5a, flags: 0x0},
- 1000: {region: 0xa2, script: 0x4c, flags: 0x0},
- 1001: {region: 0x99, script: 0x22, flags: 0x0},
- 1002: {region: 0x80, script: 0x5a, flags: 0x0},
- 1003: {region: 0x106, script: 0x20, flags: 0x0},
- 1004: {region: 0xa4, script: 0x5a, flags: 0x0},
- 1005: {region: 0x95, script: 0x5a, flags: 0x0},
- 1006: {region: 0x99, script: 0x5a, flags: 0x0},
- 1007: {region: 0x114, script: 0x5a, flags: 0x0},
- 1008: {region: 0x99, script: 0xcd, flags: 0x0},
- 1009: {region: 0x165, script: 0x5a, flags: 0x0},
- 1010: {region: 0x165, script: 0x5a, flags: 0x0},
- 1011: {region: 0x12f, script: 0x5a, flags: 0x0},
- 1012: {region: 0x9e, script: 0x5a, flags: 0x0},
- 1013: {region: 0x99, script: 0x22, flags: 0x0},
- 1014: {region: 0x165, script: 0x5, flags: 0x0},
- 1015: {region: 0x9e, script: 0x5a, flags: 0x0},
- 1016: {region: 0x7b, script: 0x5a, flags: 0x0},
- 1017: {region: 0x49, script: 0x5a, flags: 0x0},
- 1018: {region: 0x33, script: 0x4, flags: 0x1},
- 1019: {region: 0x9e, script: 0x5a, flags: 0x0},
- 1020: {region: 0x9c, script: 0x5, flags: 0x0},
- 1021: {region: 0xda, script: 0x5a, flags: 0x0},
- 1022: {region: 0x4f, script: 0x5a, flags: 0x0},
- 1023: {region: 0xd1, script: 0x5a, flags: 0x0},
- 1024: {region: 0xcf, script: 0x5a, flags: 0x0},
- 1025: {region: 0xc3, script: 0x5a, flags: 0x0},
- 1026: {region: 0x4c, script: 0x5a, flags: 0x0},
- 1027: {region: 0x96, script: 0x7e, flags: 0x0},
- 1028: {region: 0xb6, script: 0x5a, flags: 0x0},
- 1029: {region: 0x165, script: 0x2c, flags: 0x0},
- 1030: {region: 0x165, script: 0x5a, flags: 0x0},
- 1032: {region: 0xba, script: 0xe8, flags: 0x0},
- 1033: {region: 0x165, script: 0x5a, flags: 0x0},
- 1034: {region: 0xc4, script: 0x75, flags: 0x0},
- 1035: {region: 0x165, script: 0x5, flags: 0x0},
- 1036: {region: 0xb3, script: 0xd4, flags: 0x0},
- 1037: {region: 0x6f, script: 0x5a, flags: 0x0},
- 1038: {region: 0x165, script: 0x5a, flags: 0x0},
- 1039: {region: 0x165, script: 0x5a, flags: 0x0},
- 1040: {region: 0x165, script: 0x5a, flags: 0x0},
- 1041: {region: 0x165, script: 0x5a, flags: 0x0},
- 1042: {region: 0x111, script: 0x5a, flags: 0x0},
- 1043: {region: 0x165, script: 0x5a, flags: 0x0},
- 1044: {region: 0xe8, script: 0x5, flags: 0x0},
- 1045: {region: 0x165, script: 0x5a, flags: 0x0},
- 1046: {region: 0x10f, script: 0x5a, flags: 0x0},
- 1047: {region: 0x165, script: 0x5a, flags: 0x0},
- 1048: {region: 0xe9, script: 0x5a, flags: 0x0},
- 1049: {region: 0x165, script: 0x5a, flags: 0x0},
- 1050: {region: 0x95, script: 0x5a, flags: 0x0},
- 1051: {region: 0x142, script: 0x5a, flags: 0x0},
- 1052: {region: 0x10c, script: 0x5a, flags: 0x0},
- 1054: {region: 0x10c, script: 0x5a, flags: 0x0},
- 1055: {region: 0x72, script: 0x5a, flags: 0x0},
- 1056: {region: 0x97, script: 0xca, flags: 0x0},
- 1057: {region: 0x165, script: 0x5a, flags: 0x0},
- 1058: {region: 0x72, script: 0x5a, flags: 0x0},
- 1059: {region: 0x164, script: 0x5a, flags: 0x0},
- 1060: {region: 0x165, script: 0x5a, flags: 0x0},
- 1061: {region: 0xc3, script: 0x5a, flags: 0x0},
- 1062: {region: 0x165, script: 0x5a, flags: 0x0},
- 1063: {region: 0x165, script: 0x5a, flags: 0x0},
- 1064: {region: 0x165, script: 0x5a, flags: 0x0},
- 1065: {region: 0x115, script: 0x5a, flags: 0x0},
- 1066: {region: 0x165, script: 0x5a, flags: 0x0},
- 1067: {region: 0x165, script: 0x5a, flags: 0x0},
- 1068: {region: 0x123, script: 0xeb, flags: 0x0},
- 1069: {region: 0x165, script: 0x5a, flags: 0x0},
- 1070: {region: 0x165, script: 0x5a, flags: 0x0},
- 1071: {region: 0x165, script: 0x5a, flags: 0x0},
- 1072: {region: 0x165, script: 0x5a, flags: 0x0},
- 1073: {region: 0x27, script: 0x5a, flags: 0x0},
- 1074: {region: 0x37, script: 0x5, flags: 0x1},
- 1075: {region: 0x99, script: 0xd7, flags: 0x0},
- 1076: {region: 0x116, script: 0x5a, flags: 0x0},
- 1077: {region: 0x114, script: 0x5a, flags: 0x0},
- 1078: {region: 0x99, script: 0x22, flags: 0x0},
- 1079: {region: 0x161, script: 0x5a, flags: 0x0},
- 1080: {region: 0x165, script: 0x5a, flags: 0x0},
- 1081: {region: 0x165, script: 0x5a, flags: 0x0},
- 1082: {region: 0x6d, script: 0x5a, flags: 0x0},
- 1083: {region: 0x161, script: 0x5a, flags: 0x0},
- 1084: {region: 0x165, script: 0x5a, flags: 0x0},
- 1085: {region: 0x60, script: 0x5a, flags: 0x0},
- 1086: {region: 0x95, script: 0x5a, flags: 0x0},
- 1087: {region: 0x165, script: 0x5a, flags: 0x0},
- 1088: {region: 0x165, script: 0x5a, flags: 0x0},
- 1089: {region: 0x12f, script: 0x5a, flags: 0x0},
- 1090: {region: 0x165, script: 0x5a, flags: 0x0},
- 1091: {region: 0x84, script: 0x5a, flags: 0x0},
- 1092: {region: 0x10c, script: 0x5a, flags: 0x0},
- 1093: {region: 0x12f, script: 0x5a, flags: 0x0},
- 1094: {region: 0x15f, script: 0x5, flags: 0x0},
- 1095: {region: 0x4b, script: 0x5a, flags: 0x0},
- 1096: {region: 0x60, script: 0x5a, flags: 0x0},
- 1097: {region: 0x165, script: 0x5a, flags: 0x0},
- 1098: {region: 0x99, script: 0x22, flags: 0x0},
- 1099: {region: 0x95, script: 0x5a, flags: 0x0},
- 1100: {region: 0x165, script: 0x5a, flags: 0x0},
- 1101: {region: 0x35, script: 0xe, flags: 0x0},
- 1102: {region: 0x9b, script: 0xdb, flags: 0x0},
- 1103: {region: 0xe9, script: 0x5a, flags: 0x0},
- 1104: {region: 0x99, script: 0xe3, flags: 0x0},
- 1105: {region: 0xdb, script: 0x22, flags: 0x0},
- 1106: {region: 0x165, script: 0x5a, flags: 0x0},
- 1107: {region: 0x165, script: 0x5a, flags: 0x0},
- 1108: {region: 0x165, script: 0x5a, flags: 0x0},
- 1109: {region: 0x165, script: 0x5a, flags: 0x0},
- 1110: {region: 0x165, script: 0x5a, flags: 0x0},
- 1111: {region: 0x165, script: 0x5a, flags: 0x0},
- 1112: {region: 0x165, script: 0x5a, flags: 0x0},
- 1113: {region: 0x165, script: 0x5a, flags: 0x0},
- 1114: {region: 0xe7, script: 0x5a, flags: 0x0},
- 1115: {region: 0x165, script: 0x5a, flags: 0x0},
- 1116: {region: 0x165, script: 0x5a, flags: 0x0},
- 1117: {region: 0x99, script: 0x52, flags: 0x0},
- 1118: {region: 0x53, script: 0xe1, flags: 0x0},
- 1119: {region: 0xdb, script: 0x22, flags: 0x0},
- 1120: {region: 0xdb, script: 0x22, flags: 0x0},
- 1121: {region: 0x99, script: 0xe6, flags: 0x0},
- 1122: {region: 0x165, script: 0x5a, flags: 0x0},
- 1123: {region: 0x112, script: 0x5a, flags: 0x0},
- 1124: {region: 0x131, script: 0x5a, flags: 0x0},
- 1125: {region: 0x126, script: 0x5a, flags: 0x0},
- 1126: {region: 0x165, script: 0x5a, flags: 0x0},
- 1127: {region: 0x3c, script: 0x3, flags: 0x1},
- 1128: {region: 0x165, script: 0x5a, flags: 0x0},
- 1129: {region: 0x165, script: 0x5a, flags: 0x0},
- 1130: {region: 0x165, script: 0x5a, flags: 0x0},
- 1131: {region: 0x123, script: 0xeb, flags: 0x0},
- 1132: {region: 0xdb, script: 0x22, flags: 0x0},
- 1133: {region: 0xdb, script: 0x22, flags: 0x0},
- 1134: {region: 0xdb, script: 0x22, flags: 0x0},
- 1135: {region: 0x6f, script: 0x2c, flags: 0x0},
- 1136: {region: 0x165, script: 0x5a, flags: 0x0},
- 1137: {region: 0x6d, script: 0x2c, flags: 0x0},
- 1138: {region: 0x165, script: 0x5a, flags: 0x0},
- 1139: {region: 0x165, script: 0x5a, flags: 0x0},
- 1140: {region: 0x165, script: 0x5a, flags: 0x0},
- 1141: {region: 0xd6, script: 0x5a, flags: 0x0},
- 1142: {region: 0x127, script: 0x5a, flags: 0x0},
- 1143: {region: 0x125, script: 0x5a, flags: 0x0},
- 1144: {region: 0x32, script: 0x5a, flags: 0x0},
- 1145: {region: 0xdb, script: 0x22, flags: 0x0},
- 1146: {region: 0xe7, script: 0x5a, flags: 0x0},
- 1147: {region: 0x165, script: 0x5a, flags: 0x0},
- 1148: {region: 0x165, script: 0x5a, flags: 0x0},
- 1149: {region: 0x32, script: 0x5a, flags: 0x0},
- 1150: {region: 0xd4, script: 0x5a, flags: 0x0},
- 1151: {region: 0x165, script: 0x5a, flags: 0x0},
- 1152: {region: 0x161, script: 0x5a, flags: 0x0},
- 1153: {region: 0x165, script: 0x5a, flags: 0x0},
- 1154: {region: 0x129, script: 0x5a, flags: 0x0},
- 1155: {region: 0x165, script: 0x5a, flags: 0x0},
- 1156: {region: 0xce, script: 0x5a, flags: 0x0},
- 1157: {region: 0x165, script: 0x5a, flags: 0x0},
- 1158: {region: 0xe6, script: 0x5a, flags: 0x0},
- 1159: {region: 0x165, script: 0x5a, flags: 0x0},
- 1160: {region: 0x165, script: 0x5a, flags: 0x0},
- 1161: {region: 0x165, script: 0x5a, flags: 0x0},
- 1162: {region: 0x12b, script: 0x5a, flags: 0x0},
- 1163: {region: 0x12b, script: 0x5a, flags: 0x0},
- 1164: {region: 0x12e, script: 0x5a, flags: 0x0},
- 1165: {region: 0x165, script: 0x5, flags: 0x0},
- 1166: {region: 0x161, script: 0x5a, flags: 0x0},
- 1167: {region: 0x87, script: 0x34, flags: 0x0},
- 1168: {region: 0xdb, script: 0x22, flags: 0x0},
- 1169: {region: 0xe7, script: 0x5a, flags: 0x0},
- 1170: {region: 0x43, script: 0xec, flags: 0x0},
- 1171: {region: 0x165, script: 0x5a, flags: 0x0},
- 1172: {region: 0x106, script: 0x20, flags: 0x0},
- 1173: {region: 0x165, script: 0x5a, flags: 0x0},
- 1174: {region: 0x165, script: 0x5a, flags: 0x0},
- 1175: {region: 0x131, script: 0x5a, flags: 0x0},
- 1176: {region: 0x165, script: 0x5a, flags: 0x0},
- 1177: {region: 0x123, script: 0xeb, flags: 0x0},
- 1178: {region: 0x32, script: 0x5a, flags: 0x0},
- 1179: {region: 0x165, script: 0x5a, flags: 0x0},
- 1180: {region: 0x165, script: 0x5a, flags: 0x0},
- 1181: {region: 0xce, script: 0x5a, flags: 0x0},
- 1182: {region: 0x165, script: 0x5a, flags: 0x0},
- 1183: {region: 0x165, script: 0x5a, flags: 0x0},
- 1184: {region: 0x12d, script: 0x5a, flags: 0x0},
- 1185: {region: 0x165, script: 0x5a, flags: 0x0},
- 1187: {region: 0x165, script: 0x5a, flags: 0x0},
- 1188: {region: 0xd4, script: 0x5a, flags: 0x0},
- 1189: {region: 0x53, script: 0xe4, flags: 0x0},
- 1190: {region: 0xe5, script: 0x5a, flags: 0x0},
- 1191: {region: 0x165, script: 0x5a, flags: 0x0},
- 1192: {region: 0x106, script: 0x20, flags: 0x0},
- 1193: {region: 0xba, script: 0x5a, flags: 0x0},
- 1194: {region: 0x165, script: 0x5a, flags: 0x0},
- 1195: {region: 0x106, script: 0x20, flags: 0x0},
- 1196: {region: 0x3f, script: 0x4, flags: 0x1},
- 1197: {region: 0x11c, script: 0xf0, flags: 0x0},
- 1198: {region: 0x130, script: 0x20, flags: 0x0},
- 1199: {region: 0x75, script: 0x5a, flags: 0x0},
- 1200: {region: 0x2a, script: 0x5a, flags: 0x0},
- 1202: {region: 0x43, script: 0x3, flags: 0x1},
- 1203: {region: 0x99, script: 0xe, flags: 0x0},
- 1204: {region: 0xe8, script: 0x5, flags: 0x0},
- 1205: {region: 0x165, script: 0x5a, flags: 0x0},
- 1206: {region: 0x165, script: 0x5a, flags: 0x0},
- 1207: {region: 0x165, script: 0x5a, flags: 0x0},
- 1208: {region: 0x165, script: 0x5a, flags: 0x0},
- 1209: {region: 0x165, script: 0x5a, flags: 0x0},
- 1210: {region: 0x165, script: 0x5a, flags: 0x0},
- 1211: {region: 0x165, script: 0x5a, flags: 0x0},
- 1212: {region: 0x46, script: 0x4, flags: 0x1},
- 1213: {region: 0x165, script: 0x5a, flags: 0x0},
- 1214: {region: 0xb4, script: 0xf1, flags: 0x0},
- 1215: {region: 0x165, script: 0x5a, flags: 0x0},
- 1216: {region: 0x161, script: 0x5a, flags: 0x0},
- 1217: {region: 0x9e, script: 0x5a, flags: 0x0},
- 1218: {region: 0x106, script: 0x5a, flags: 0x0},
- 1219: {region: 0x13e, script: 0x5a, flags: 0x0},
- 1220: {region: 0x11b, script: 0x5a, flags: 0x0},
- 1221: {region: 0x165, script: 0x5a, flags: 0x0},
- 1222: {region: 0x36, script: 0x5a, flags: 0x0},
- 1223: {region: 0x60, script: 0x5a, flags: 0x0},
- 1224: {region: 0xd1, script: 0x5a, flags: 0x0},
- 1225: {region: 0x1, script: 0x5a, flags: 0x0},
- 1226: {region: 0x106, script: 0x5a, flags: 0x0},
- 1227: {region: 0x6a, script: 0x5a, flags: 0x0},
- 1228: {region: 0x12f, script: 0x5a, flags: 0x0},
- 1229: {region: 0x165, script: 0x5a, flags: 0x0},
- 1230: {region: 0x36, script: 0x5a, flags: 0x0},
- 1231: {region: 0x4e, script: 0x5a, flags: 0x0},
- 1232: {region: 0x165, script: 0x5a, flags: 0x0},
- 1233: {region: 0x6f, script: 0x2c, flags: 0x0},
- 1234: {region: 0x165, script: 0x5a, flags: 0x0},
- 1235: {region: 0xe7, script: 0x5a, flags: 0x0},
- 1236: {region: 0x2f, script: 0x5a, flags: 0x0},
- 1237: {region: 0x99, script: 0xe6, flags: 0x0},
- 1238: {region: 0x99, script: 0x22, flags: 0x0},
- 1239: {region: 0x165, script: 0x5a, flags: 0x0},
- 1240: {region: 0x165, script: 0x5a, flags: 0x0},
- 1241: {region: 0x165, script: 0x5a, flags: 0x0},
- 1242: {region: 0x165, script: 0x5a, flags: 0x0},
- 1243: {region: 0x165, script: 0x5a, flags: 0x0},
- 1244: {region: 0x165, script: 0x5a, flags: 0x0},
- 1245: {region: 0x165, script: 0x5a, flags: 0x0},
- 1246: {region: 0x165, script: 0x5a, flags: 0x0},
- 1247: {region: 0x165, script: 0x5a, flags: 0x0},
- 1248: {region: 0x140, script: 0x5a, flags: 0x0},
- 1249: {region: 0x165, script: 0x5a, flags: 0x0},
- 1250: {region: 0x165, script: 0x5a, flags: 0x0},
- 1251: {region: 0xa8, script: 0x5, flags: 0x0},
- 1252: {region: 0x165, script: 0x5a, flags: 0x0},
- 1253: {region: 0x114, script: 0x5a, flags: 0x0},
- 1254: {region: 0x165, script: 0x5a, flags: 0x0},
- 1255: {region: 0x165, script: 0x5a, flags: 0x0},
- 1256: {region: 0x165, script: 0x5a, flags: 0x0},
- 1257: {region: 0x165, script: 0x5a, flags: 0x0},
- 1258: {region: 0x99, script: 0x22, flags: 0x0},
- 1259: {region: 0x53, script: 0x3b, flags: 0x0},
- 1260: {region: 0x165, script: 0x5a, flags: 0x0},
- 1261: {region: 0x165, script: 0x5a, flags: 0x0},
- 1262: {region: 0x41, script: 0x5a, flags: 0x0},
- 1263: {region: 0x165, script: 0x5a, flags: 0x0},
- 1264: {region: 0x12b, script: 0x18, flags: 0x0},
- 1265: {region: 0x165, script: 0x5a, flags: 0x0},
- 1266: {region: 0x161, script: 0x5a, flags: 0x0},
- 1267: {region: 0x165, script: 0x5a, flags: 0x0},
- 1268: {region: 0x12b, script: 0x62, flags: 0x0},
- 1269: {region: 0x12b, script: 0x63, flags: 0x0},
- 1270: {region: 0x7d, script: 0x2e, flags: 0x0},
- 1271: {region: 0x53, script: 0x67, flags: 0x0},
- 1272: {region: 0x10b, script: 0x6c, flags: 0x0},
- 1273: {region: 0x108, script: 0x77, flags: 0x0},
- 1274: {region: 0x99, script: 0x22, flags: 0x0},
- 1275: {region: 0x131, script: 0x5a, flags: 0x0},
- 1276: {region: 0x165, script: 0x5a, flags: 0x0},
- 1277: {region: 0x9c, script: 0x91, flags: 0x0},
- 1278: {region: 0x165, script: 0x5a, flags: 0x0},
- 1279: {region: 0x15e, script: 0xcc, flags: 0x0},
- 1280: {region: 0x165, script: 0x5a, flags: 0x0},
- 1281: {region: 0x165, script: 0x5a, flags: 0x0},
- 1282: {region: 0xdb, script: 0x22, flags: 0x0},
- 1283: {region: 0x165, script: 0x5a, flags: 0x0},
- 1284: {region: 0x165, script: 0x5a, flags: 0x0},
- 1285: {region: 0xd1, script: 0x5a, flags: 0x0},
- 1286: {region: 0x75, script: 0x5a, flags: 0x0},
- 1287: {region: 0x165, script: 0x5a, flags: 0x0},
- 1288: {region: 0x165, script: 0x5a, flags: 0x0},
- 1289: {region: 0x52, script: 0x5a, flags: 0x0},
- 1290: {region: 0x165, script: 0x5a, flags: 0x0},
- 1291: {region: 0x165, script: 0x5a, flags: 0x0},
- 1292: {region: 0x165, script: 0x5a, flags: 0x0},
- 1293: {region: 0x52, script: 0x5a, flags: 0x0},
- 1294: {region: 0x165, script: 0x5a, flags: 0x0},
- 1295: {region: 0x165, script: 0x5a, flags: 0x0},
- 1296: {region: 0x165, script: 0x5a, flags: 0x0},
- 1297: {region: 0x165, script: 0x5a, flags: 0x0},
- 1298: {region: 0x1, script: 0x3e, flags: 0x0},
- 1299: {region: 0x165, script: 0x5a, flags: 0x0},
- 1300: {region: 0x165, script: 0x5a, flags: 0x0},
- 1301: {region: 0x165, script: 0x5a, flags: 0x0},
- 1302: {region: 0x165, script: 0x5a, flags: 0x0},
- 1303: {region: 0x165, script: 0x5a, flags: 0x0},
- 1304: {region: 0xd6, script: 0x5a, flags: 0x0},
- 1305: {region: 0x165, script: 0x5a, flags: 0x0},
- 1306: {region: 0x165, script: 0x5a, flags: 0x0},
- 1307: {region: 0x165, script: 0x5a, flags: 0x0},
- 1308: {region: 0x41, script: 0x5a, flags: 0x0},
- 1309: {region: 0x165, script: 0x5a, flags: 0x0},
- 1310: {region: 0xcf, script: 0x5a, flags: 0x0},
- 1311: {region: 0x4a, script: 0x3, flags: 0x1},
- 1312: {region: 0x165, script: 0x5a, flags: 0x0},
- 1313: {region: 0x165, script: 0x5a, flags: 0x0},
- 1314: {region: 0x165, script: 0x5a, flags: 0x0},
- 1315: {region: 0x53, script: 0x5a, flags: 0x0},
- 1316: {region: 0x10b, script: 0x5a, flags: 0x0},
- 1318: {region: 0xa8, script: 0x5, flags: 0x0},
- 1319: {region: 0xd9, script: 0x5a, flags: 0x0},
- 1320: {region: 0xba, script: 0xe8, flags: 0x0},
- 1321: {region: 0x4d, script: 0x14, flags: 0x1},
- 1322: {region: 0x53, script: 0x7d, flags: 0x0},
- 1323: {region: 0x165, script: 0x5a, flags: 0x0},
- 1324: {region: 0x122, script: 0x5a, flags: 0x0},
- 1325: {region: 0xd0, script: 0x5a, flags: 0x0},
- 1326: {region: 0x165, script: 0x5a, flags: 0x0},
- 1327: {region: 0x161, script: 0x5a, flags: 0x0},
- 1329: {region: 0x12b, script: 0x5a, flags: 0x0},
-}
-
-// likelyLangList holds lists info associated with likelyLang.
-// Size: 582 bytes, 97 elements
-var likelyLangList = [97]likelyScriptRegion{
- 0: {region: 0x9c, script: 0x7, flags: 0x0},
- 1: {region: 0xa1, script: 0x78, flags: 0x2},
- 2: {region: 0x11c, script: 0x85, flags: 0x2},
- 3: {region: 0x32, script: 0x5a, flags: 0x0},
- 4: {region: 0x9b, script: 0x5, flags: 0x4},
- 5: {region: 0x9c, script: 0x5, flags: 0x4},
- 6: {region: 0x106, script: 0x20, flags: 0x4},
- 7: {region: 0x9c, script: 0x5, flags: 0x2},
- 8: {region: 0x106, script: 0x20, flags: 0x0},
- 9: {region: 0x38, script: 0x2f, flags: 0x2},
- 10: {region: 0x135, script: 0x5a, flags: 0x0},
- 11: {region: 0x7b, script: 0xcf, flags: 0x2},
- 12: {region: 0x114, script: 0x5a, flags: 0x0},
- 13: {region: 0x84, script: 0x1, flags: 0x2},
- 14: {region: 0x5d, script: 0x1f, flags: 0x0},
- 15: {region: 0x87, script: 0x5f, flags: 0x2},
- 16: {region: 0xd6, script: 0x5a, flags: 0x0},
- 17: {region: 0x52, script: 0x5, flags: 0x4},
- 18: {region: 0x10b, script: 0x5, flags: 0x4},
- 19: {region: 0xae, script: 0x20, flags: 0x0},
- 20: {region: 0x24, script: 0x5, flags: 0x4},
- 21: {region: 0x53, script: 0x5, flags: 0x4},
- 22: {region: 0x9c, script: 0x5, flags: 0x4},
- 23: {region: 0xc5, script: 0x5, flags: 0x4},
- 24: {region: 0x53, script: 0x5, flags: 0x2},
- 25: {region: 0x12b, script: 0x5a, flags: 0x0},
- 26: {region: 0xb0, script: 0x5, flags: 0x4},
- 27: {region: 0x9b, script: 0x5, flags: 0x2},
- 28: {region: 0xa5, script: 0x20, flags: 0x0},
- 29: {region: 0x53, script: 0x5, flags: 0x4},
- 30: {region: 0x12b, script: 0x5a, flags: 0x4},
- 31: {region: 0x53, script: 0x5, flags: 0x2},
- 32: {region: 0x12b, script: 0x5a, flags: 0x2},
- 33: {region: 0xdb, script: 0x22, flags: 0x0},
- 34: {region: 0x99, script: 0x5d, flags: 0x2},
- 35: {region: 0x83, script: 0x5a, flags: 0x0},
- 36: {region: 0x84, script: 0x7c, flags: 0x4},
- 37: {region: 0x84, script: 0x7c, flags: 0x2},
- 38: {region: 0xc5, script: 0x20, flags: 0x0},
- 39: {region: 0x53, script: 0x70, flags: 0x4},
- 40: {region: 0x53, script: 0x70, flags: 0x2},
- 41: {region: 0xd0, script: 0x5a, flags: 0x0},
- 42: {region: 0x4a, script: 0x5, flags: 0x4},
- 43: {region: 0x95, script: 0x5, flags: 0x4},
- 44: {region: 0x99, script: 0x36, flags: 0x0},
- 45: {region: 0xe8, script: 0x5, flags: 0x4},
- 46: {region: 0xe8, script: 0x5, flags: 0x2},
- 47: {region: 0x9c, script: 0x8b, flags: 0x0},
- 48: {region: 0x53, script: 0x8c, flags: 0x2},
- 49: {region: 0xba, script: 0xe8, flags: 0x0},
- 50: {region: 0xd9, script: 0x5a, flags: 0x4},
- 51: {region: 0xe8, script: 0x5, flags: 0x0},
- 52: {region: 0x99, script: 0x22, flags: 0x2},
- 53: {region: 0x99, script: 0x4f, flags: 0x2},
- 54: {region: 0x99, script: 0xd3, flags: 0x2},
- 55: {region: 0x105, script: 0x20, flags: 0x0},
- 56: {region: 0xbd, script: 0x5a, flags: 0x4},
- 57: {region: 0x104, script: 0x5a, flags: 0x4},
- 58: {region: 0x106, script: 0x5a, flags: 0x4},
- 59: {region: 0x12b, script: 0x5a, flags: 0x4},
- 60: {region: 0x124, script: 0x20, flags: 0x0},
- 61: {region: 0xe8, script: 0x5, flags: 0x4},
- 62: {region: 0xe8, script: 0x5, flags: 0x2},
- 63: {region: 0x53, script: 0x5, flags: 0x0},
- 64: {region: 0xae, script: 0x20, flags: 0x4},
- 65: {region: 0xc5, script: 0x20, flags: 0x4},
- 66: {region: 0xae, script: 0x20, flags: 0x2},
- 67: {region: 0x99, script: 0xe, flags: 0x0},
- 68: {region: 0xdb, script: 0x22, flags: 0x4},
- 69: {region: 0xdb, script: 0x22, flags: 0x2},
- 70: {region: 0x137, script: 0x5a, flags: 0x0},
- 71: {region: 0x24, script: 0x5, flags: 0x4},
- 72: {region: 0x53, script: 0x20, flags: 0x4},
- 73: {region: 0x24, script: 0x5, flags: 0x2},
- 74: {region: 0x8d, script: 0x3c, flags: 0x0},
- 75: {region: 0x53, script: 0x3b, flags: 0x4},
- 76: {region: 0x53, script: 0x3b, flags: 0x2},
- 77: {region: 0x53, script: 0x3b, flags: 0x0},
- 78: {region: 0x2f, script: 0x3c, flags: 0x4},
- 79: {region: 0x3e, script: 0x3c, flags: 0x4},
- 80: {region: 0x7b, script: 0x3c, flags: 0x4},
- 81: {region: 0x7e, script: 0x3c, flags: 0x4},
- 82: {region: 0x8d, script: 0x3c, flags: 0x4},
- 83: {region: 0x95, script: 0x3c, flags: 0x4},
- 84: {region: 0xc6, script: 0x3c, flags: 0x4},
- 85: {region: 0xd0, script: 0x3c, flags: 0x4},
- 86: {region: 0xe2, script: 0x3c, flags: 0x4},
- 87: {region: 0xe5, script: 0x3c, flags: 0x4},
- 88: {region: 0xe7, script: 0x3c, flags: 0x4},
- 89: {region: 0x116, script: 0x3c, flags: 0x4},
- 90: {region: 0x123, script: 0x3c, flags: 0x4},
- 91: {region: 0x12e, script: 0x3c, flags: 0x4},
- 92: {region: 0x135, script: 0x3c, flags: 0x4},
- 93: {region: 0x13e, script: 0x3c, flags: 0x4},
- 94: {region: 0x12e, script: 0x11, flags: 0x2},
- 95: {region: 0x12e, script: 0x37, flags: 0x2},
- 96: {region: 0x12e, script: 0x3c, flags: 0x2},
-}
-
-type likelyLangScript struct {
- lang uint16
- script uint16
- flags uint8
-}
-
-// likelyRegion is a lookup table, indexed by regionID, for the most likely
-// languages and scripts given incomplete information. If more entries exist
-// for a given regionID, lang and script are the index and size respectively
-// of the list in likelyRegionList.
-// TODO: exclude containers and user-definable regions from the list.
-// Size: 2148 bytes, 358 elements
-var likelyRegion = [358]likelyLangScript{
- 34: {lang: 0xd7, script: 0x5a, flags: 0x0},
- 35: {lang: 0x3a, script: 0x5, flags: 0x0},
- 36: {lang: 0x0, script: 0x2, flags: 0x1},
- 39: {lang: 0x2, script: 0x2, flags: 0x1},
- 40: {lang: 0x4, script: 0x2, flags: 0x1},
- 42: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 43: {lang: 0x0, script: 0x5a, flags: 0x0},
- 44: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 45: {lang: 0x41b, script: 0x5a, flags: 0x0},
- 46: {lang: 0x10d, script: 0x5a, flags: 0x0},
- 48: {lang: 0x367, script: 0x5a, flags: 0x0},
- 49: {lang: 0x444, script: 0x5a, flags: 0x0},
- 50: {lang: 0x58, script: 0x5a, flags: 0x0},
- 51: {lang: 0x6, script: 0x2, flags: 0x1},
- 53: {lang: 0xa5, script: 0xe, flags: 0x0},
- 54: {lang: 0x367, script: 0x5a, flags: 0x0},
- 55: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 56: {lang: 0x7e, script: 0x20, flags: 0x0},
- 57: {lang: 0x3a, script: 0x5, flags: 0x0},
- 58: {lang: 0x3d9, script: 0x5a, flags: 0x0},
- 59: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 60: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 62: {lang: 0x31f, script: 0x5a, flags: 0x0},
- 63: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 64: {lang: 0x3a1, script: 0x5a, flags: 0x0},
- 65: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 67: {lang: 0x8, script: 0x2, flags: 0x1},
- 69: {lang: 0x0, script: 0x5a, flags: 0x0},
- 71: {lang: 0x71, script: 0x20, flags: 0x0},
- 73: {lang: 0x512, script: 0x3e, flags: 0x2},
- 74: {lang: 0x31f, script: 0x5, flags: 0x2},
- 75: {lang: 0x445, script: 0x5a, flags: 0x0},
- 76: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 77: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 78: {lang: 0x10d, script: 0x5a, flags: 0x0},
- 79: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 81: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 82: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 83: {lang: 0xa, script: 0x4, flags: 0x1},
- 84: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 85: {lang: 0x0, script: 0x5a, flags: 0x0},
- 86: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 89: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 90: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 91: {lang: 0x3a1, script: 0x5a, flags: 0x0},
- 93: {lang: 0xe, script: 0x2, flags: 0x1},
- 94: {lang: 0xfa, script: 0x5a, flags: 0x0},
- 96: {lang: 0x10d, script: 0x5a, flags: 0x0},
- 98: {lang: 0x1, script: 0x5a, flags: 0x0},
- 99: {lang: 0x101, script: 0x5a, flags: 0x0},
- 101: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 103: {lang: 0x10, script: 0x2, flags: 0x1},
- 104: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 105: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 106: {lang: 0x140, script: 0x5a, flags: 0x0},
- 107: {lang: 0x3a, script: 0x5, flags: 0x0},
- 108: {lang: 0x3a, script: 0x5, flags: 0x0},
- 109: {lang: 0x46f, script: 0x2c, flags: 0x0},
- 110: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 111: {lang: 0x12, script: 0x2, flags: 0x1},
- 113: {lang: 0x10d, script: 0x5a, flags: 0x0},
- 114: {lang: 0x151, script: 0x5a, flags: 0x0},
- 115: {lang: 0x1c0, script: 0x22, flags: 0x2},
- 118: {lang: 0x158, script: 0x5a, flags: 0x0},
- 120: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 122: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 123: {lang: 0x14, script: 0x2, flags: 0x1},
- 125: {lang: 0x16, script: 0x3, flags: 0x1},
- 126: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 128: {lang: 0x21, script: 0x5a, flags: 0x0},
- 130: {lang: 0x245, script: 0x5a, flags: 0x0},
- 132: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 133: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 134: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 135: {lang: 0x19, script: 0x2, flags: 0x1},
- 136: {lang: 0x0, script: 0x5a, flags: 0x0},
- 137: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 139: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 141: {lang: 0x529, script: 0x3c, flags: 0x0},
- 142: {lang: 0x0, script: 0x5a, flags: 0x0},
- 143: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 144: {lang: 0x1d1, script: 0x5a, flags: 0x0},
- 145: {lang: 0x1d4, script: 0x5a, flags: 0x0},
- 146: {lang: 0x1d5, script: 0x5a, flags: 0x0},
- 148: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 149: {lang: 0x1b, script: 0x2, flags: 0x1},
- 151: {lang: 0x1bc, script: 0x3e, flags: 0x0},
- 153: {lang: 0x1d, script: 0x3, flags: 0x1},
- 155: {lang: 0x3a, script: 0x5, flags: 0x0},
- 156: {lang: 0x20, script: 0x2, flags: 0x1},
- 157: {lang: 0x1f8, script: 0x5a, flags: 0x0},
- 158: {lang: 0x1f9, script: 0x5a, flags: 0x0},
- 161: {lang: 0x3a, script: 0x5, flags: 0x0},
- 162: {lang: 0x200, script: 0x49, flags: 0x0},
- 164: {lang: 0x445, script: 0x5a, flags: 0x0},
- 165: {lang: 0x28a, script: 0x20, flags: 0x0},
- 166: {lang: 0x22, script: 0x3, flags: 0x1},
- 168: {lang: 0x25, script: 0x2, flags: 0x1},
- 170: {lang: 0x254, script: 0x53, flags: 0x0},
- 171: {lang: 0x254, script: 0x53, flags: 0x0},
- 172: {lang: 0x3a, script: 0x5, flags: 0x0},
- 174: {lang: 0x3e2, script: 0x20, flags: 0x0},
- 175: {lang: 0x27, script: 0x2, flags: 0x1},
- 176: {lang: 0x3a, script: 0x5, flags: 0x0},
- 178: {lang: 0x10d, script: 0x5a, flags: 0x0},
- 179: {lang: 0x40c, script: 0xd4, flags: 0x0},
- 181: {lang: 0x43b, script: 0x5a, flags: 0x0},
- 182: {lang: 0x2c0, script: 0x5a, flags: 0x0},
- 183: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 184: {lang: 0x2c7, script: 0x5a, flags: 0x0},
- 185: {lang: 0x3a, script: 0x5, flags: 0x0},
- 186: {lang: 0x29, script: 0x2, flags: 0x1},
- 187: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 188: {lang: 0x2b, script: 0x2, flags: 0x1},
- 189: {lang: 0x432, script: 0x5a, flags: 0x0},
- 190: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 191: {lang: 0x2f1, script: 0x5a, flags: 0x0},
- 194: {lang: 0x2d, script: 0x2, flags: 0x1},
- 195: {lang: 0xa0, script: 0x5a, flags: 0x0},
- 196: {lang: 0x2f, script: 0x2, flags: 0x1},
- 197: {lang: 0x31, script: 0x2, flags: 0x1},
- 198: {lang: 0x33, script: 0x2, flags: 0x1},
- 200: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 201: {lang: 0x35, script: 0x2, flags: 0x1},
- 203: {lang: 0x320, script: 0x5a, flags: 0x0},
- 204: {lang: 0x37, script: 0x3, flags: 0x1},
- 205: {lang: 0x128, script: 0xea, flags: 0x0},
- 207: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 208: {lang: 0x31f, script: 0x5a, flags: 0x0},
- 209: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 210: {lang: 0x16, script: 0x5a, flags: 0x0},
- 211: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 212: {lang: 0x1b4, script: 0x5a, flags: 0x0},
- 214: {lang: 0x1b4, script: 0x5, flags: 0x2},
- 216: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 217: {lang: 0x367, script: 0x5a, flags: 0x0},
- 218: {lang: 0x347, script: 0x5a, flags: 0x0},
- 219: {lang: 0x351, script: 0x22, flags: 0x0},
- 225: {lang: 0x3a, script: 0x5, flags: 0x0},
- 226: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 228: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 229: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 230: {lang: 0x486, script: 0x5a, flags: 0x0},
- 231: {lang: 0x153, script: 0x5a, flags: 0x0},
- 232: {lang: 0x3a, script: 0x3, flags: 0x1},
- 233: {lang: 0x3b3, script: 0x5a, flags: 0x0},
- 234: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 236: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 237: {lang: 0x3a, script: 0x5, flags: 0x0},
- 238: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 240: {lang: 0x3a2, script: 0x5a, flags: 0x0},
- 241: {lang: 0x194, script: 0x5a, flags: 0x0},
- 243: {lang: 0x3a, script: 0x5, flags: 0x0},
- 258: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 260: {lang: 0x3d, script: 0x2, flags: 0x1},
- 261: {lang: 0x432, script: 0x20, flags: 0x0},
- 262: {lang: 0x3f, script: 0x2, flags: 0x1},
- 263: {lang: 0x3e5, script: 0x5a, flags: 0x0},
- 264: {lang: 0x3a, script: 0x5, flags: 0x0},
- 266: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 267: {lang: 0x3a, script: 0x5, flags: 0x0},
- 268: {lang: 0x41, script: 0x2, flags: 0x1},
- 271: {lang: 0x416, script: 0x5a, flags: 0x0},
- 272: {lang: 0x347, script: 0x5a, flags: 0x0},
- 273: {lang: 0x43, script: 0x2, flags: 0x1},
- 275: {lang: 0x1f9, script: 0x5a, flags: 0x0},
- 276: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 277: {lang: 0x429, script: 0x5a, flags: 0x0},
- 278: {lang: 0x367, script: 0x5a, flags: 0x0},
- 280: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 282: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 284: {lang: 0x45, script: 0x2, flags: 0x1},
- 288: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 289: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 290: {lang: 0x47, script: 0x2, flags: 0x1},
- 291: {lang: 0x49, script: 0x3, flags: 0x1},
- 292: {lang: 0x4c, script: 0x2, flags: 0x1},
- 293: {lang: 0x477, script: 0x5a, flags: 0x0},
- 294: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 295: {lang: 0x476, script: 0x5a, flags: 0x0},
- 296: {lang: 0x4e, script: 0x2, flags: 0x1},
- 297: {lang: 0x482, script: 0x5a, flags: 0x0},
- 299: {lang: 0x50, script: 0x4, flags: 0x1},
- 301: {lang: 0x4a0, script: 0x5a, flags: 0x0},
- 302: {lang: 0x54, script: 0x2, flags: 0x1},
- 303: {lang: 0x445, script: 0x5a, flags: 0x0},
- 304: {lang: 0x56, script: 0x3, flags: 0x1},
- 305: {lang: 0x445, script: 0x5a, flags: 0x0},
- 309: {lang: 0x512, script: 0x3e, flags: 0x2},
- 310: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 311: {lang: 0x4bc, script: 0x5a, flags: 0x0},
- 312: {lang: 0x1f9, script: 0x5a, flags: 0x0},
- 315: {lang: 0x13e, script: 0x5a, flags: 0x0},
- 318: {lang: 0x4c3, script: 0x5a, flags: 0x0},
- 319: {lang: 0x8a, script: 0x5a, flags: 0x0},
- 320: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 322: {lang: 0x41b, script: 0x5a, flags: 0x0},
- 333: {lang: 0x59, script: 0x2, flags: 0x1},
- 350: {lang: 0x3a, script: 0x5, flags: 0x0},
- 351: {lang: 0x5b, script: 0x2, flags: 0x1},
- 356: {lang: 0x423, script: 0x5a, flags: 0x0},
-}
-
-// likelyRegionList holds lists info associated with likelyRegion.
-// Size: 558 bytes, 93 elements
-var likelyRegionList = [93]likelyLangScript{
- 0: {lang: 0x148, script: 0x5, flags: 0x0},
- 1: {lang: 0x476, script: 0x5a, flags: 0x0},
- 2: {lang: 0x431, script: 0x5a, flags: 0x0},
- 3: {lang: 0x2ff, script: 0x20, flags: 0x0},
- 4: {lang: 0x1d7, script: 0x8, flags: 0x0},
- 5: {lang: 0x274, script: 0x5a, flags: 0x0},
- 6: {lang: 0xb7, script: 0x5a, flags: 0x0},
- 7: {lang: 0x432, script: 0x20, flags: 0x0},
- 8: {lang: 0x12d, script: 0xec, flags: 0x0},
- 9: {lang: 0x351, script: 0x22, flags: 0x0},
- 10: {lang: 0x529, script: 0x3b, flags: 0x0},
- 11: {lang: 0x4ac, script: 0x5, flags: 0x0},
- 12: {lang: 0x523, script: 0x5a, flags: 0x0},
- 13: {lang: 0x29a, script: 0xeb, flags: 0x0},
- 14: {lang: 0x136, script: 0x34, flags: 0x0},
- 15: {lang: 0x48a, script: 0x5a, flags: 0x0},
- 16: {lang: 0x3a, script: 0x5, flags: 0x0},
- 17: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 18: {lang: 0x27, script: 0x2c, flags: 0x0},
- 19: {lang: 0x139, script: 0x5a, flags: 0x0},
- 20: {lang: 0x26a, script: 0x5, flags: 0x2},
- 21: {lang: 0x512, script: 0x3e, flags: 0x2},
- 22: {lang: 0x210, script: 0x2e, flags: 0x0},
- 23: {lang: 0x5, script: 0x20, flags: 0x0},
- 24: {lang: 0x274, script: 0x5a, flags: 0x0},
- 25: {lang: 0x136, script: 0x34, flags: 0x0},
- 26: {lang: 0x2ff, script: 0x20, flags: 0x0},
- 27: {lang: 0x1e1, script: 0x5a, flags: 0x0},
- 28: {lang: 0x31f, script: 0x5, flags: 0x0},
- 29: {lang: 0x1be, script: 0x22, flags: 0x0},
- 30: {lang: 0x4b4, script: 0x5, flags: 0x0},
- 31: {lang: 0x236, script: 0x75, flags: 0x0},
- 32: {lang: 0x148, script: 0x5, flags: 0x0},
- 33: {lang: 0x476, script: 0x5a, flags: 0x0},
- 34: {lang: 0x24a, script: 0x4e, flags: 0x0},
- 35: {lang: 0xe6, script: 0x5, flags: 0x0},
- 36: {lang: 0x226, script: 0xeb, flags: 0x0},
- 37: {lang: 0x3a, script: 0x5, flags: 0x0},
- 38: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 39: {lang: 0x2b8, script: 0x57, flags: 0x0},
- 40: {lang: 0x226, script: 0xeb, flags: 0x0},
- 41: {lang: 0x3a, script: 0x5, flags: 0x0},
- 42: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 43: {lang: 0x3dc, script: 0x5a, flags: 0x0},
- 44: {lang: 0x4ae, script: 0x20, flags: 0x0},
- 45: {lang: 0x2ff, script: 0x20, flags: 0x0},
- 46: {lang: 0x431, script: 0x5a, flags: 0x0},
- 47: {lang: 0x331, script: 0x75, flags: 0x0},
- 48: {lang: 0x213, script: 0x5a, flags: 0x0},
- 49: {lang: 0x30b, script: 0x20, flags: 0x0},
- 50: {lang: 0x242, script: 0x5, flags: 0x0},
- 51: {lang: 0x529, script: 0x3c, flags: 0x0},
- 52: {lang: 0x3c0, script: 0x5a, flags: 0x0},
- 53: {lang: 0x3a, script: 0x5, flags: 0x0},
- 54: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 55: {lang: 0x2ed, script: 0x5a, flags: 0x0},
- 56: {lang: 0x4b4, script: 0x5, flags: 0x0},
- 57: {lang: 0x88, script: 0x22, flags: 0x0},
- 58: {lang: 0x4b4, script: 0x5, flags: 0x0},
- 59: {lang: 0x4b4, script: 0x5, flags: 0x0},
- 60: {lang: 0xbe, script: 0x22, flags: 0x0},
- 61: {lang: 0x3dc, script: 0x5a, flags: 0x0},
- 62: {lang: 0x7e, script: 0x20, flags: 0x0},
- 63: {lang: 0x3e2, script: 0x20, flags: 0x0},
- 64: {lang: 0x267, script: 0x5a, flags: 0x0},
- 65: {lang: 0x444, script: 0x5a, flags: 0x0},
- 66: {lang: 0x512, script: 0x3e, flags: 0x0},
- 67: {lang: 0x412, script: 0x5a, flags: 0x0},
- 68: {lang: 0x4ae, script: 0x20, flags: 0x0},
- 69: {lang: 0x3a, script: 0x5, flags: 0x0},
- 70: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 71: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 72: {lang: 0x35, script: 0x5, flags: 0x0},
- 73: {lang: 0x46b, script: 0xeb, flags: 0x0},
- 74: {lang: 0x2ec, script: 0x5, flags: 0x0},
- 75: {lang: 0x30f, script: 0x75, flags: 0x0},
- 76: {lang: 0x467, script: 0x20, flags: 0x0},
- 77: {lang: 0x148, script: 0x5, flags: 0x0},
- 78: {lang: 0x3a, script: 0x5, flags: 0x0},
- 79: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 80: {lang: 0x48a, script: 0x5a, flags: 0x0},
- 81: {lang: 0x58, script: 0x5, flags: 0x0},
- 82: {lang: 0x219, script: 0x20, flags: 0x0},
- 83: {lang: 0x81, script: 0x34, flags: 0x0},
- 84: {lang: 0x529, script: 0x3c, flags: 0x0},
- 85: {lang: 0x48c, script: 0x5a, flags: 0x0},
- 86: {lang: 0x4ae, script: 0x20, flags: 0x0},
- 87: {lang: 0x512, script: 0x3e, flags: 0x0},
- 88: {lang: 0x3b3, script: 0x5a, flags: 0x0},
- 89: {lang: 0x431, script: 0x5a, flags: 0x0},
- 90: {lang: 0x432, script: 0x20, flags: 0x0},
- 91: {lang: 0x15e, script: 0x5a, flags: 0x0},
- 92: {lang: 0x446, script: 0x5, flags: 0x0},
-}
-
-type likelyTag struct {
- lang uint16
- region uint16
- script uint16
-}
-
-// Size: 198 bytes, 33 elements
-var likelyRegionGroup = [33]likelyTag{
- 1: {lang: 0x139, region: 0xd6, script: 0x5a},
- 2: {lang: 0x139, region: 0x135, script: 0x5a},
- 3: {lang: 0x3c0, region: 0x41, script: 0x5a},
- 4: {lang: 0x139, region: 0x2f, script: 0x5a},
- 5: {lang: 0x139, region: 0xd6, script: 0x5a},
- 6: {lang: 0x13e, region: 0xcf, script: 0x5a},
- 7: {lang: 0x445, region: 0x12f, script: 0x5a},
- 8: {lang: 0x3a, region: 0x6b, script: 0x5},
- 9: {lang: 0x445, region: 0x4b, script: 0x5a},
- 10: {lang: 0x139, region: 0x161, script: 0x5a},
- 11: {lang: 0x139, region: 0x135, script: 0x5a},
- 12: {lang: 0x139, region: 0x135, script: 0x5a},
- 13: {lang: 0x13e, region: 0x59, script: 0x5a},
- 14: {lang: 0x529, region: 0x53, script: 0x3b},
- 15: {lang: 0x1be, region: 0x99, script: 0x22},
- 16: {lang: 0x1e1, region: 0x95, script: 0x5a},
- 17: {lang: 0x1f9, region: 0x9e, script: 0x5a},
- 18: {lang: 0x139, region: 0x2f, script: 0x5a},
- 19: {lang: 0x139, region: 0xe6, script: 0x5a},
- 20: {lang: 0x139, region: 0x8a, script: 0x5a},
- 21: {lang: 0x41b, region: 0x142, script: 0x5a},
- 22: {lang: 0x529, region: 0x53, script: 0x3b},
- 23: {lang: 0x4bc, region: 0x137, script: 0x5a},
- 24: {lang: 0x3a, region: 0x108, script: 0x5},
- 25: {lang: 0x3e2, region: 0x106, script: 0x20},
- 26: {lang: 0x3e2, region: 0x106, script: 0x20},
- 27: {lang: 0x139, region: 0x7b, script: 0x5a},
- 28: {lang: 0x10d, region: 0x60, script: 0x5a},
- 29: {lang: 0x139, region: 0xd6, script: 0x5a},
- 30: {lang: 0x13e, region: 0x1f, script: 0x5a},
- 31: {lang: 0x139, region: 0x9a, script: 0x5a},
- 32: {lang: 0x139, region: 0x7b, script: 0x5a},
-}
-
-// Size: 264 bytes, 33 elements
-var regionContainment = [33]uint64{
- // Entry 0 - 1F
- 0x00000001ffffffff, 0x00000000200007a2, 0x0000000000003044, 0x0000000000000008,
- 0x00000000803c0010, 0x0000000000000020, 0x0000000000000040, 0x0000000000000080,
- 0x0000000000000100, 0x0000000000000200, 0x0000000000000400, 0x000000004000384c,
- 0x0000000000001000, 0x0000000000002000, 0x0000000000004000, 0x0000000000008000,
- 0x0000000000010000, 0x0000000000020000, 0x0000000000040000, 0x0000000000080000,
- 0x0000000000100000, 0x0000000000200000, 0x0000000001c1c000, 0x0000000000800000,
- 0x0000000001000000, 0x000000001e020000, 0x0000000004000000, 0x0000000008000000,
- 0x0000000010000000, 0x00000000200006a0, 0x0000000040002048, 0x0000000080000000,
- // Entry 20 - 3F
- 0x0000000100000000,
-}
-
-// regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
-// where each set holds all groupings that are directly connected in a region
-// containment graph.
-// Size: 358 bytes, 358 elements
-var regionInclusion = [358]uint8{
- // Entry 0 - 3F
- 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
- 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
- 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
- 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
- 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x23,
- 0x24, 0x26, 0x27, 0x22, 0x28, 0x29, 0x2a, 0x2b,
- 0x26, 0x2c, 0x24, 0x23, 0x26, 0x25, 0x2a, 0x2d,
- 0x2e, 0x24, 0x2f, 0x2d, 0x26, 0x30, 0x31, 0x28,
- // Entry 40 - 7F
- 0x26, 0x28, 0x26, 0x25, 0x31, 0x22, 0x32, 0x33,
- 0x34, 0x30, 0x22, 0x27, 0x27, 0x27, 0x35, 0x2d,
- 0x29, 0x28, 0x27, 0x36, 0x28, 0x22, 0x34, 0x23,
- 0x21, 0x26, 0x2d, 0x26, 0x22, 0x37, 0x2e, 0x35,
- 0x2a, 0x22, 0x2f, 0x38, 0x26, 0x26, 0x21, 0x39,
- 0x39, 0x28, 0x38, 0x39, 0x39, 0x2f, 0x3a, 0x2f,
- 0x20, 0x21, 0x38, 0x3b, 0x28, 0x3c, 0x2c, 0x21,
- 0x2a, 0x35, 0x27, 0x38, 0x26, 0x24, 0x28, 0x2c,
- // Entry 80 - BF
- 0x2d, 0x23, 0x30, 0x2d, 0x2d, 0x26, 0x27, 0x3a,
- 0x22, 0x34, 0x3c, 0x2d, 0x28, 0x36, 0x22, 0x34,
- 0x3a, 0x26, 0x2e, 0x21, 0x39, 0x31, 0x38, 0x24,
- 0x2c, 0x25, 0x22, 0x24, 0x25, 0x2c, 0x3a, 0x2c,
- 0x26, 0x24, 0x36, 0x21, 0x2f, 0x3d, 0x31, 0x3c,
- 0x2f, 0x26, 0x36, 0x36, 0x24, 0x26, 0x3d, 0x31,
- 0x24, 0x26, 0x35, 0x25, 0x2d, 0x32, 0x38, 0x2a,
- 0x38, 0x39, 0x39, 0x35, 0x33, 0x23, 0x26, 0x2f,
- // Entry C0 - FF
- 0x3c, 0x21, 0x23, 0x2d, 0x31, 0x36, 0x36, 0x3c,
- 0x26, 0x2d, 0x26, 0x3a, 0x2f, 0x25, 0x2f, 0x34,
- 0x31, 0x2f, 0x32, 0x3b, 0x2d, 0x2b, 0x2d, 0x21,
- 0x34, 0x2a, 0x2c, 0x25, 0x21, 0x3c, 0x24, 0x29,
- 0x2b, 0x24, 0x34, 0x21, 0x28, 0x29, 0x3b, 0x31,
- 0x25, 0x2e, 0x30, 0x29, 0x26, 0x24, 0x3a, 0x21,
- 0x3c, 0x28, 0x21, 0x24, 0x21, 0x21, 0x1f, 0x21,
- 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
- // Entry 100 - 13F
- 0x21, 0x21, 0x2f, 0x21, 0x2e, 0x23, 0x33, 0x2f,
- 0x24, 0x3b, 0x2f, 0x39, 0x38, 0x31, 0x2d, 0x3a,
- 0x2c, 0x2e, 0x2d, 0x23, 0x2d, 0x2f, 0x28, 0x2f,
- 0x27, 0x33, 0x34, 0x26, 0x24, 0x32, 0x22, 0x26,
- 0x27, 0x22, 0x2d, 0x31, 0x3d, 0x29, 0x31, 0x3d,
- 0x39, 0x29, 0x31, 0x24, 0x26, 0x29, 0x36, 0x2f,
- 0x33, 0x2f, 0x21, 0x22, 0x21, 0x30, 0x28, 0x3d,
- 0x23, 0x26, 0x21, 0x28, 0x26, 0x26, 0x31, 0x3b,
- // Entry 140 - 17F
- 0x29, 0x21, 0x29, 0x21, 0x21, 0x21, 0x21, 0x21,
- 0x21, 0x21, 0x21, 0x21, 0x21, 0x23, 0x21, 0x21,
- 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
- 0x21, 0x21, 0x21, 0x21, 0x21, 0x24, 0x24, 0x2f,
- 0x23, 0x32, 0x2f, 0x27, 0x2f, 0x21,
-}
-
-// regionInclusionBits is an array of bit vectors where every vector represents
-// a set of region groupings. These sets are used to compute the distance
-// between two regions for the purpose of language matching.
-// Size: 584 bytes, 73 elements
-var regionInclusionBits = [73]uint64{
- // Entry 0 - 1F
- 0x0000000102400813, 0x00000000200007a3, 0x0000000000003844, 0x0000000040000808,
- 0x00000000803c0011, 0x0000000020000022, 0x0000000040000844, 0x0000000020000082,
- 0x0000000000000102, 0x0000000020000202, 0x0000000020000402, 0x000000004000384d,
- 0x0000000000001804, 0x0000000040002804, 0x0000000000404000, 0x0000000000408000,
- 0x0000000000410000, 0x0000000002020000, 0x0000000000040010, 0x0000000000080010,
- 0x0000000000100010, 0x0000000000200010, 0x0000000001c1c001, 0x0000000000c00000,
- 0x0000000001400000, 0x000000001e020001, 0x0000000006000000, 0x000000000a000000,
- 0x0000000012000000, 0x00000000200006a2, 0x0000000040002848, 0x0000000080000010,
- // Entry 20 - 3F
- 0x0000000100000001, 0x0000000000000001, 0x0000000080000000, 0x0000000000020000,
- 0x0000000001000000, 0x0000000000008000, 0x0000000000002000, 0x0000000000000200,
- 0x0000000000000008, 0x0000000000200000, 0x0000000110000000, 0x0000000000040000,
- 0x0000000008000000, 0x0000000000000020, 0x0000000104000000, 0x0000000000000080,
- 0x0000000000001000, 0x0000000000010000, 0x0000000000000400, 0x0000000004000000,
- 0x0000000000000040, 0x0000000010000000, 0x0000000000004000, 0x0000000101000000,
- 0x0000000108000000, 0x0000000000000100, 0x0000000100020000, 0x0000000000080000,
- 0x0000000000100000, 0x0000000000800000, 0x00000001ffffffff, 0x0000000122400fb3,
- // Entry 40 - 5F
- 0x00000001827c0813, 0x000000014240385f, 0x0000000103c1c813, 0x000000011e420813,
- 0x0000000112000001, 0x0000000106000001, 0x0000000101400001, 0x000000010a000001,
- 0x0000000102020001,
-}
-
-// regionInclusionNext marks, for each entry in regionInclusionBits, the set of
-// all groups that are reachable from the groups set in the respective entry.
-// Size: 73 bytes, 73 elements
-var regionInclusionNext = [73]uint8{
- // Entry 0 - 3F
- 0x3e, 0x3f, 0x0b, 0x0b, 0x40, 0x01, 0x0b, 0x01,
- 0x01, 0x01, 0x01, 0x41, 0x0b, 0x0b, 0x16, 0x16,
- 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x42, 0x16,
- 0x16, 0x43, 0x19, 0x19, 0x19, 0x01, 0x0b, 0x04,
- 0x00, 0x00, 0x1f, 0x11, 0x18, 0x0f, 0x0d, 0x09,
- 0x03, 0x15, 0x44, 0x12, 0x1b, 0x05, 0x45, 0x07,
- 0x0c, 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x46,
- 0x47, 0x08, 0x48, 0x13, 0x14, 0x17, 0x3e, 0x3e,
- // Entry 40 - 7F
- 0x3e, 0x3e, 0x3e, 0x3e, 0x43, 0x43, 0x42, 0x43,
- 0x43,
-}
-
-type parentRel struct {
- lang uint16
- script uint16
- maxScript uint16
- toRegion uint16
- fromRegion []uint16
-}
-
-// Size: 414 bytes, 5 elements
-var parents = [5]parentRel{
- 0: {lang: 0x139, script: 0x0, maxScript: 0x5a, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x25, 0x26, 0x2f, 0x34, 0x36, 0x3d, 0x42, 0x46, 0x48, 0x49, 0x4a, 0x50, 0x52, 0x5c, 0x5d, 0x61, 0x64, 0x6d, 0x73, 0x74, 0x75, 0x7b, 0x7c, 0x7f, 0x80, 0x81, 0x83, 0x8c, 0x8d, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9f, 0xa0, 0xa4, 0xa7, 0xa9, 0xad, 0xb1, 0xb4, 0xb5, 0xbf, 0xc6, 0xca, 0xcb, 0xcc, 0xce, 0xd0, 0xd2, 0xd5, 0xd6, 0xdd, 0xdf, 0xe0, 0xe6, 0xe7, 0xe8, 0xeb, 0xf0, 0x107, 0x109, 0x10a, 0x10b, 0x10d, 0x10e, 0x112, 0x117, 0x11b, 0x11d, 0x11f, 0x125, 0x129, 0x12c, 0x12d, 0x12f, 0x131, 0x139, 0x13c, 0x13f, 0x142, 0x161, 0x162, 0x164}},
- 1: {lang: 0x139, script: 0x0, maxScript: 0x5a, toRegion: 0x1a, fromRegion: []uint16{0x2e, 0x4e, 0x60, 0x63, 0x72, 0xd9, 0x10c, 0x10f}},
- 2: {lang: 0x13e, script: 0x0, maxScript: 0x5a, toRegion: 0x1f, fromRegion: []uint16{0x2c, 0x3f, 0x41, 0x48, 0x51, 0x54, 0x56, 0x59, 0x65, 0x69, 0x89, 0x8f, 0xcf, 0xd8, 0xe2, 0xe4, 0xec, 0xf1, 0x11a, 0x135, 0x136, 0x13b}},
- 3: {lang: 0x3c0, script: 0x0, maxScript: 0x5a, toRegion: 0xee, fromRegion: []uint16{0x2a, 0x4e, 0x5a, 0x86, 0x8b, 0xb7, 0xc6, 0xd1, 0x118, 0x126}},
- 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8d, fromRegion: []uint16{0xc6}},
-}
-
-// Total table size 30244 bytes (29KiB); checksum: B6B15F30
diff --git a/vendor/golang.org/x/text/internal/language/tags.go b/vendor/golang.org/x/text/internal/language/tags.go
deleted file mode 100644
index e7afd3188e..0000000000
--- a/vendor/golang.org/x/text/internal/language/tags.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
-// It simplifies safe initialization of Tag values.
-func MustParse(s string) Tag {
- t, err := Parse(s)
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
-// It simplifies safe initialization of Base values.
-func MustParseBase(s string) Language {
- b, err := ParseBase(s)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-// MustParseScript is like ParseScript, but panics if the given script cannot be
-// parsed. It simplifies safe initialization of Script values.
-func MustParseScript(s string) Script {
- scr, err := ParseScript(s)
- if err != nil {
- panic(err)
- }
- return scr
-}
-
-// MustParseRegion is like ParseRegion, but panics if the given region cannot be
-// parsed. It simplifies safe initialization of Region values.
-func MustParseRegion(s string) Region {
- r, err := ParseRegion(s)
- if err != nil {
- panic(err)
- }
- return r
-}
-
-// Und is the root language.
-var Und Tag
diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go
deleted file mode 100644
index b5d348891d..0000000000
--- a/vendor/golang.org/x/text/internal/tag/tag.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tag contains functionality handling tags and related data.
-package tag // import "golang.org/x/text/internal/tag"
-
-import "sort"
-
-// An Index converts tags to a compact numeric value.
-//
-// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
-// be used to store additional information about the tag.
-type Index string
-
-// Elem returns the element data at the given index.
-func (s Index) Elem(x int) string {
- return string(s[x*4 : x*4+4])
-}
-
-// Index reports the index of the given key or -1 if it could not be found.
-// Only the first len(key) bytes from the start of the 4-byte entries will be
-// considered for the search and the first match in Index will be returned.
-func (s Index) Index(key []byte) int {
- n := len(key)
- // search the index of the first entry with an equal or higher value than
- // key in s.
- index := sort.Search(len(s)/4, func(i int) bool {
- return cmp(s[i*4:i*4+n], key) != -1
- })
- i := index * 4
- if cmp(s[i:i+len(key)], key) != 0 {
- return -1
- }
- return index
-}
-
-// Next finds the next occurrence of key after index x, which must have been
-// obtained from a call to Index using the same key. It returns x+1 or -1.
-func (s Index) Next(key []byte, x int) int {
- if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
- return x
- }
- return -1
-}
-
-// cmp returns an integer comparing a and b lexicographically.
-func cmp(a Index, b []byte) int {
- n := len(a)
- if len(b) < n {
- n = len(b)
- }
- for i, c := range b[:n] {
- switch {
- case a[i] > c:
- return 1
- case a[i] < c:
- return -1
- }
- }
- switch {
- case len(a) < len(b):
- return -1
- case len(a) > len(b):
- return 1
- }
- return 0
-}
-
-// Compare returns an integer comparing a and b lexicographically.
-func Compare(a string, b []byte) int {
- return cmp(Index(a), b)
-}
-
-// FixCase reformats b to the same pattern of cases as form.
-// If returns false if string b is malformed.
-func FixCase(form string, b []byte) bool {
- if len(form) != len(b) {
- return false
- }
- for i, c := range b {
- if form[i] <= 'Z' {
- if c >= 'a' {
- c -= 'z' - 'Z'
- }
- if c < 'A' || 'Z' < c {
- return false
- }
- } else {
- if c <= 'Z' {
- c += 'z' - 'Z'
- }
- if c < 'a' || 'z' < c {
- return false
- }
- }
- b[i] = c
- }
- return true
-}
diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go
deleted file mode 100644
index a24fd1a4d6..0000000000
--- a/vendor/golang.org/x/text/language/coverage.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "fmt"
- "sort"
-
- "golang.org/x/text/internal/language"
-)
-
-// The Coverage interface is used to define the level of coverage of an
-// internationalization service. Note that not all types are supported by all
-// services. As lists may be generated on the fly, it is recommended that users
-// of a Coverage cache the results.
-type Coverage interface {
- // Tags returns the list of supported tags.
- Tags() []Tag
-
- // BaseLanguages returns the list of supported base languages.
- BaseLanguages() []Base
-
- // Scripts returns the list of supported scripts.
- Scripts() []Script
-
- // Regions returns the list of supported regions.
- Regions() []Region
-}
-
-var (
- // Supported defines a Coverage that lists all supported subtags. Tags
- // always returns nil.
- Supported Coverage = allSubtags{}
-)
-
-// TODO:
-// - Support Variants, numbering systems.
-// - CLDR coverage levels.
-// - Set of common tags defined in this package.
-
-type allSubtags struct{}
-
-// Regions returns the list of supported regions. As all regions are in a
-// consecutive range, it simply returns a slice of numbers in increasing order.
-// The "undefined" region is not returned.
-func (s allSubtags) Regions() []Region {
- reg := make([]Region, language.NumRegions)
- for i := range reg {
- reg[i] = Region{language.Region(i + 1)}
- }
- return reg
-}
-
-// Scripts returns the list of supported scripts. As all scripts are in a
-// consecutive range, it simply returns a slice of numbers in increasing order.
-// The "undefined" script is not returned.
-func (s allSubtags) Scripts() []Script {
- scr := make([]Script, language.NumScripts)
- for i := range scr {
- scr[i] = Script{language.Script(i + 1)}
- }
- return scr
-}
-
-// BaseLanguages returns the list of all supported base languages. It generates
-// the list by traversing the internal structures.
-func (s allSubtags) BaseLanguages() []Base {
- bs := language.BaseLanguages()
- base := make([]Base, len(bs))
- for i, b := range bs {
- base[i] = Base{b}
- }
- return base
-}
-
-// Tags always returns nil.
-func (s allSubtags) Tags() []Tag {
- return nil
-}
-
-// coverage is used by NewCoverage which is used as a convenient way for
-// creating Coverage implementations for partially defined data. Very often a
-// package will only need to define a subset of slices. coverage provides a
-// convenient way to do this. Moreover, packages using NewCoverage, instead of
-// their own implementation, will not break if later new slice types are added.
-type coverage struct {
- tags func() []Tag
- bases func() []Base
- scripts func() []Script
- regions func() []Region
-}
-
-func (s *coverage) Tags() []Tag {
- if s.tags == nil {
- return nil
- }
- return s.tags()
-}
-
-// bases implements sort.Interface and is used to sort base languages.
-type bases []Base
-
-func (b bases) Len() int {
- return len(b)
-}
-
-func (b bases) Swap(i, j int) {
- b[i], b[j] = b[j], b[i]
-}
-
-func (b bases) Less(i, j int) bool {
- return b[i].langID < b[j].langID
-}
-
-// BaseLanguages returns the result from calling s.bases if it is specified or
-// otherwise derives the set of supported base languages from tags.
-func (s *coverage) BaseLanguages() []Base {
- if s.bases == nil {
- tags := s.Tags()
- if len(tags) == 0 {
- return nil
- }
- a := make([]Base, len(tags))
- for i, t := range tags {
- a[i] = Base{language.Language(t.lang())}
- }
- sort.Sort(bases(a))
- k := 0
- for i := 1; i < len(a); i++ {
- if a[k] != a[i] {
- k++
- a[k] = a[i]
- }
- }
- return a[:k+1]
- }
- return s.bases()
-}
-
-func (s *coverage) Scripts() []Script {
- if s.scripts == nil {
- return nil
- }
- return s.scripts()
-}
-
-func (s *coverage) Regions() []Region {
- if s.regions == nil {
- return nil
- }
- return s.regions()
-}
-
-// NewCoverage returns a Coverage for the given lists. It is typically used by
-// packages providing internationalization services to define their level of
-// coverage. A list may be of type []T or func() []T, where T is either Tag,
-// Base, Script or Region. The returned Coverage derives the value for Bases
-// from Tags if no func or slice for []Base is specified. For other unspecified
-// types the returned Coverage will return nil for the respective methods.
-func NewCoverage(list ...interface{}) Coverage {
- s := &coverage{}
- for _, x := range list {
- switch v := x.(type) {
- case func() []Base:
- s.bases = v
- case func() []Script:
- s.scripts = v
- case func() []Region:
- s.regions = v
- case func() []Tag:
- s.tags = v
- case []Base:
- s.bases = func() []Base { return v }
- case []Script:
- s.scripts = func() []Script { return v }
- case []Region:
- s.regions = func() []Region { return v }
- case []Tag:
- s.tags = func() []Tag { return v }
- default:
- panic(fmt.Sprintf("language: unsupported set type %T", v))
- }
- }
- return s
-}
diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go
deleted file mode 100644
index 212b77c906..0000000000
--- a/vendor/golang.org/x/text/language/doc.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package language implements BCP 47 language tags and related functionality.
-//
-// The most important function of package language is to match a list of
-// user-preferred languages to a list of supported languages.
-// It alleviates the developer of dealing with the complexity of this process
-// and provides the user with the best experience
-// (see https://blog.golang.org/matchlang).
-//
-// # Matching preferred against supported languages
-//
-// A Matcher for an application that supports English, Australian English,
-// Danish, and standard Mandarin can be created as follows:
-//
-// var matcher = language.NewMatcher([]language.Tag{
-// language.English, // The first language is used as fallback.
-// language.MustParse("en-AU"),
-// language.Danish,
-// language.Chinese,
-// })
-//
-// This list of supported languages is typically implied by the languages for
-// which there exists translations of the user interface.
-//
-// User-preferred languages usually come as a comma-separated list of BCP 47
-// language tags.
-// The MatchString finds best matches for such strings:
-//
-// handler(w http.ResponseWriter, r *http.Request) {
-// lang, _ := r.Cookie("lang")
-// accept := r.Header.Get("Accept-Language")
-// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
-//
-// // tag should now be used for the initialization of any
-// // locale-specific service.
-// }
-//
-// The Matcher's Match method can be used to match Tags directly.
-//
-// Matchers are aware of the intricacies of equivalence between languages, such
-// as deprecated subtags, legacy tags, macro languages, mutual
-// intelligibility between scripts and languages, and transparently passing
-// BCP 47 user configuration.
-// For instance, it will know that a reader of Bokmål Danish can read Norwegian
-// and will know that Cantonese ("yue") is a good match for "zh-HK".
-//
-// # Using match results
-//
-// To guarantee a consistent user experience to the user it is important to
-// use the same language tag for the selection of any locale-specific services.
-// For example, it is utterly confusing to substitute spelled-out numbers
-// or dates in one language in text of another language.
-// More subtly confusing is using the wrong sorting order or casing
-// algorithm for a certain language.
-//
-// All the packages in x/text that provide locale-specific services
-// (e.g. collate, cases) should be initialized with the tag that was
-// obtained at the start of an interaction with the user.
-//
-// Note that Tag that is returned by Match and MatchString may differ from any
-// of the supported languages, as it may contain carried over settings from
-// the user tags.
-// This may be inconvenient when your application has some additional
-// locale-specific data for your supported languages.
-// Match and MatchString both return the index of the matched supported tag
-// to simplify associating such data with the matched tag.
-//
-// # Canonicalization
-//
-// If one uses the Matcher to compare languages one does not need to
-// worry about canonicalization.
-//
-// The meaning of a Tag varies per application. The language package
-// therefore delays canonicalization and preserves information as much
-// as possible. The Matcher, however, will always take into account that
-// two different tags may represent the same language.
-//
-// By default, only legacy and deprecated tags are converted into their
-// canonical equivalent. All other information is preserved. This approach makes
-// the confidence scores more accurate and allows matchers to distinguish
-// between variants that are otherwise lost.
-//
-// As a consequence, two tags that should be treated as identical according to
-// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
-// Matcher handles such distinctions, though, and is aware of the
-// equivalence relations. The CanonType type can be used to alter the
-// canonicalization form.
-//
-// # References
-//
-// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
-package language // import "golang.org/x/text/language"
-
-// TODO: explanation on how to match languages for your own locale-specific
-// service.
diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go
deleted file mode 100644
index 4d9c661212..0000000000
--- a/vendor/golang.org/x/text/language/language.go
+++ /dev/null
@@ -1,605 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run gen.go -output tables.go
-
-package language
-
-// TODO: Remove above NOTE after:
-// - verifying that tables are dropped correctly (most notably matcher tables).
-
-import (
- "strings"
-
- "golang.org/x/text/internal/language"
- "golang.org/x/text/internal/language/compact"
-)
-
-// Tag represents a BCP 47 language tag. It is used to specify an instance of a
-// specific language or locale. All language tag values are guaranteed to be
-// well-formed.
-type Tag compact.Tag
-
-func makeTag(t language.Tag) (tag Tag) {
- return Tag(compact.Make(t))
-}
-
-func (t *Tag) tag() language.Tag {
- return (*compact.Tag)(t).Tag()
-}
-
-func (t *Tag) isCompact() bool {
- return (*compact.Tag)(t).IsCompact()
-}
-
-// TODO: improve performance.
-func (t *Tag) lang() language.Language { return t.tag().LangID }
-func (t *Tag) region() language.Region { return t.tag().RegionID }
-func (t *Tag) script() language.Script { return t.tag().ScriptID }
-
-// Make is a convenience wrapper for Parse that omits the error.
-// In case of an error, a sensible default is returned.
-func Make(s string) Tag {
- return Default.Make(s)
-}
-
-// Make is a convenience wrapper for c.Parse that omits the error.
-// In case of an error, a sensible default is returned.
-func (c CanonType) Make(s string) Tag {
- t, _ := c.Parse(s)
- return t
-}
-
-// Raw returns the raw base language, script and region, without making an
-// attempt to infer their values.
-func (t Tag) Raw() (b Base, s Script, r Region) {
- tt := t.tag()
- return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID}
-}
-
-// IsRoot returns true if t is equal to language "und".
-func (t Tag) IsRoot() bool {
- return compact.Tag(t).IsRoot()
-}
-
-// CanonType can be used to enable or disable various types of canonicalization.
-type CanonType int
-
-const (
- // Replace deprecated base languages with their preferred replacements.
- DeprecatedBase CanonType = 1 << iota
- // Replace deprecated scripts with their preferred replacements.
- DeprecatedScript
- // Replace deprecated regions with their preferred replacements.
- DeprecatedRegion
- // Remove redundant scripts.
- SuppressScript
- // Normalize legacy encodings. This includes legacy languages defined in
- // CLDR as well as bibliographic codes defined in ISO-639.
- Legacy
- // Map the dominant language of a macro language group to the macro language
- // subtag. For example cmn -> zh.
- Macro
- // The CLDR flag should be used if full compatibility with CLDR is required.
- // There are a few cases where language.Tag may differ from CLDR. To follow all
- // of CLDR's suggestions, use All|CLDR.
- CLDR
-
- // Raw can be used to Compose or Parse without Canonicalization.
- Raw CanonType = 0
-
- // Replace all deprecated tags with their preferred replacements.
- Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
-
- // All canonicalizations recommended by BCP 47.
- BCP47 = Deprecated | SuppressScript
-
- // All canonicalizations.
- All = BCP47 | Legacy | Macro
-
- // Default is the canonicalization used by Parse, Make and Compose. To
- // preserve as much information as possible, canonicalizations that remove
- // potentially valuable information are not included. The Matcher is
- // designed to recognize similar tags that would be the same if
- // they were canonicalized using All.
- Default = Deprecated | Legacy
-
- canonLang = DeprecatedBase | Legacy | Macro
-
- // TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
-)
-
-// canonicalize returns the canonicalized equivalent of the tag and
-// whether there was any change.
-func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) {
- if c == Raw {
- return t, false
- }
- changed := false
- if c&SuppressScript != 0 {
- if t.LangID.SuppressScript() == t.ScriptID {
- t.ScriptID = 0
- changed = true
- }
- }
- if c&canonLang != 0 {
- for {
- if l, aliasType := t.LangID.Canonicalize(); l != t.LangID {
- switch aliasType {
- case language.Legacy:
- if c&Legacy != 0 {
- if t.LangID == _sh && t.ScriptID == 0 {
- t.ScriptID = _Latn
- }
- t.LangID = l
- changed = true
- }
- case language.Macro:
- if c&Macro != 0 {
- // We deviate here from CLDR. The mapping "nb" -> "no"
- // qualifies as a typical Macro language mapping. However,
- // for legacy reasons, CLDR maps "no", the macro language
- // code for Norwegian, to the dominant variant "nb". This
- // change is currently under consideration for CLDR as well.
- // See https://unicode.org/cldr/trac/ticket/2698 and also
- // https://unicode.org/cldr/trac/ticket/1790 for some of the
- // practical implications. TODO: this check could be removed
- // if CLDR adopts this change.
- if c&CLDR == 0 || t.LangID != _nb {
- changed = true
- t.LangID = l
- }
- }
- case language.Deprecated:
- if c&DeprecatedBase != 0 {
- if t.LangID == _mo && t.RegionID == 0 {
- t.RegionID = _MD
- }
- t.LangID = l
- changed = true
- // Other canonicalization types may still apply.
- continue
- }
- }
- } else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 {
- t.LangID = _nb
- changed = true
- }
- break
- }
- }
- if c&DeprecatedScript != 0 {
- if t.ScriptID == _Qaai {
- changed = true
- t.ScriptID = _Zinh
- }
- }
- if c&DeprecatedRegion != 0 {
- if r := t.RegionID.Canonicalize(); r != t.RegionID {
- changed = true
- t.RegionID = r
- }
- }
- return t, changed
-}
-
-// Canonicalize returns the canonicalized equivalent of the tag.
-func (c CanonType) Canonicalize(t Tag) (Tag, error) {
- // First try fast path.
- if t.isCompact() {
- if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed {
- return t, nil
- }
- }
- // It is unlikely that one will canonicalize a tag after matching. So do
- // a slow but simple approach here.
- if tag, changed := canonicalize(c, t.tag()); changed {
- tag.RemakeString()
- return makeTag(tag), nil
- }
- return t, nil
-
-}
-
-// Confidence indicates the level of certainty for a given return value.
-// For example, Serbian may be written in Cyrillic or Latin script.
-// The confidence level indicates whether a value was explicitly specified,
-// whether it is typically the only possible value, or whether there is
-// an ambiguity.
-type Confidence int
-
-const (
- No Confidence = iota // full confidence that there was no match
- Low // most likely value picked out of a set of alternatives
- High // value is generally assumed to be the correct match
- Exact // exact match or explicitly specified value
-)
-
-var confName = []string{"No", "Low", "High", "Exact"}
-
-func (c Confidence) String() string {
- return confName[c]
-}
-
-// String returns the canonical string representation of the language tag.
-func (t Tag) String() string {
- return t.tag().String()
-}
-
-// MarshalText implements encoding.TextMarshaler.
-func (t Tag) MarshalText() (text []byte, err error) {
- return t.tag().MarshalText()
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (t *Tag) UnmarshalText(text []byte) error {
- var tag language.Tag
- err := tag.UnmarshalText(text)
- *t = makeTag(tag)
- return err
-}
-
-// Base returns the base language of the language tag. If the base language is
-// unspecified, an attempt will be made to infer it from the context.
-// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
-func (t Tag) Base() (Base, Confidence) {
- if b := t.lang(); b != 0 {
- return Base{b}, Exact
- }
- tt := t.tag()
- c := High
- if tt.ScriptID == 0 && !tt.RegionID.IsCountry() {
- c = Low
- }
- if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 {
- return Base{tag.LangID}, c
- }
- return Base{0}, No
-}
-
-// Script infers the script for the language tag. If it was not explicitly given, it will infer
-// a most likely candidate.
-// If more than one script is commonly used for a language, the most likely one
-// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
-// for Serbian.
-// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
-// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
-// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
-// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
-// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
-// Note that an inferred script is never guaranteed to be the correct one. Latin is
-// almost exclusively used for Afrikaans, but Arabic has been used for some texts
-// in the past. Also, the script that is commonly used may change over time.
-// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
-func (t Tag) Script() (Script, Confidence) {
- if scr := t.script(); scr != 0 {
- return Script{scr}, Exact
- }
- tt := t.tag()
- sc, c := language.Script(_Zzzz), No
- if scr := tt.LangID.SuppressScript(); scr != 0 {
- // Note: it is not always the case that a language with a suppress
- // script value is only written in one script (e.g. kk, ms, pa).
- if tt.RegionID == 0 {
- return Script{scr}, High
- }
- sc, c = scr, High
- }
- if tag, err := tt.Maximize(); err == nil {
- if tag.ScriptID != sc {
- sc, c = tag.ScriptID, Low
- }
- } else {
- tt, _ = canonicalize(Deprecated|Macro, tt)
- if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc {
- sc, c = tag.ScriptID, Low
- }
- }
- return Script{sc}, c
-}
-
-// Region returns the region for the language tag. If it was not explicitly given, it will
-// infer a most likely candidate from the context.
-// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
-func (t Tag) Region() (Region, Confidence) {
- if r := t.region(); r != 0 {
- return Region{r}, Exact
- }
- tt := t.tag()
- if tt, err := tt.Maximize(); err == nil {
- return Region{tt.RegionID}, Low // TODO: differentiate between high and low.
- }
- tt, _ = canonicalize(Deprecated|Macro, tt)
- if tag, err := tt.Maximize(); err == nil {
- return Region{tag.RegionID}, Low
- }
- return Region{_ZZ}, No // TODO: return world instead of undetermined?
-}
-
-// Variants returns the variants specified explicitly for this language tag.
-// or nil if no variant was specified.
-func (t Tag) Variants() []Variant {
- if !compact.Tag(t).MayHaveVariants() {
- return nil
- }
- v := []Variant{}
- x, str := "", t.tag().Variants()
- for str != "" {
- x, str = nextToken(str)
- v = append(v, Variant{x})
- }
- return v
-}
-
-// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
-// specific language are substituted with fields from the parent language.
-// The parent for a language may change for newer versions of CLDR.
-//
-// Parent returns a tag for a less specific language that is mutually
-// intelligible or Und if there is no such language. This may not be the same as
-// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW"
-// is "zh-Hant", and the parent of "zh-Hant" is "und".
-func (t Tag) Parent() Tag {
- return Tag(compact.Tag(t).Parent())
-}
-
-// nextToken returns token t and the rest of the string.
-func nextToken(s string) (t, tail string) {
- p := strings.Index(s[1:], "-")
- if p == -1 {
- return s[1:], ""
- }
- p++
- return s[1:p], s[p:]
-}
-
-// Extension is a single BCP 47 extension.
-type Extension struct {
- s string
-}
-
-// String returns the string representation of the extension, including the
-// type tag.
-func (e Extension) String() string {
- return e.s
-}
-
-// ParseExtension parses s as an extension and returns it on success.
-func ParseExtension(s string) (e Extension, err error) {
- ext, err := language.ParseExtension(s)
- return Extension{ext}, err
-}
-
-// Type returns the one-byte extension type of e. It returns 0 for the zero
-// exception.
-func (e Extension) Type() byte {
- if e.s == "" {
- return 0
- }
- return e.s[0]
-}
-
-// Tokens returns the list of tokens of e.
-func (e Extension) Tokens() []string {
- return strings.Split(e.s, "-")
-}
-
-// Extension returns the extension of type x for tag t. It will return
-// false for ok if t does not have the requested extension. The returned
-// extension will be invalid in this case.
-func (t Tag) Extension(x byte) (ext Extension, ok bool) {
- if !compact.Tag(t).MayHaveExtensions() {
- return Extension{}, false
- }
- e, ok := t.tag().Extension(x)
- return Extension{e}, ok
-}
-
-// Extensions returns all extensions of t.
-func (t Tag) Extensions() []Extension {
- if !compact.Tag(t).MayHaveExtensions() {
- return nil
- }
- e := []Extension{}
- for _, ext := range t.tag().Extensions() {
- e = append(e, Extension{ext})
- }
- return e
-}
-
-// TypeForKey returns the type associated with the given key, where key and type
-// are of the allowed values defined for the Unicode locale extension ('u') in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// TypeForKey will traverse the inheritance chain to get the correct value.
-//
-// If there are multiple types associated with a key, only the first will be
-// returned. If there is no type associated with a key, it returns the empty
-// string.
-func (t Tag) TypeForKey(key string) string {
- if !compact.Tag(t).MayHaveExtensions() {
- if key != "rg" && key != "va" {
- return ""
- }
- }
- return t.tag().TypeForKey(key)
-}
-
-// SetTypeForKey returns a new Tag with the key set to type, where key and type
-// are of the allowed values defined for the Unicode locale extension ('u') in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// An empty value removes an existing pair with the same key.
-func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
- tt, err := t.tag().SetTypeForKey(key, value)
- return makeTag(tt), err
-}
-
-// NumCompactTags is the number of compact tags. The maximum tag is
-// NumCompactTags-1.
-const NumCompactTags = compact.NumCompactTags
-
-// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
-// for which data exists in the text repository.The index will change over time
-// and should not be stored in persistent storage. If t does not match a compact
-// index, exact will be false and the compact index will be returned for the
-// first match after repeatedly taking the Parent of t.
-func CompactIndex(t Tag) (index int, exact bool) {
- id, exact := compact.LanguageID(compact.Tag(t))
- return int(id), exact
-}
-
-var root = language.Tag{}
-
-// Base is an ISO 639 language code, used for encoding the base language
-// of a language tag.
-type Base struct {
- langID language.Language
-}
-
-// ParseBase parses a 2- or 3-letter ISO 639 code.
-// It returns a ValueError if s is a well-formed but unknown language identifier
-// or another error if another error occurred.
-func ParseBase(s string) (Base, error) {
- l, err := language.ParseBase(s)
- return Base{l}, err
-}
-
-// String returns the BCP 47 representation of the base language.
-func (b Base) String() string {
- return b.langID.String()
-}
-
-// ISO3 returns the ISO 639-3 language code.
-func (b Base) ISO3() string {
- return b.langID.ISO3()
-}
-
-// IsPrivateUse reports whether this language code is reserved for private use.
-func (b Base) IsPrivateUse() bool {
- return b.langID.IsPrivateUse()
-}
-
-// Script is a 4-letter ISO 15924 code for representing scripts.
-// It is idiomatically represented in title case.
-type Script struct {
- scriptID language.Script
-}
-
-// ParseScript parses a 4-letter ISO 15924 code.
-// It returns a ValueError if s is a well-formed but unknown script identifier
-// or another error if another error occurred.
-func ParseScript(s string) (Script, error) {
- sc, err := language.ParseScript(s)
- return Script{sc}, err
-}
-
-// String returns the script code in title case.
-// It returns "Zzzz" for an unspecified script.
-func (s Script) String() string {
- return s.scriptID.String()
-}
-
-// IsPrivateUse reports whether this script code is reserved for private use.
-func (s Script) IsPrivateUse() bool {
- return s.scriptID.IsPrivateUse()
-}
-
-// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
-type Region struct {
- regionID language.Region
-}
-
-// EncodeM49 returns the Region for the given UN M.49 code.
-// It returns an error if r is not a valid code.
-func EncodeM49(r int) (Region, error) {
- rid, err := language.EncodeM49(r)
- return Region{rid}, err
-}
-
-// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
-// It returns a ValueError if s is a well-formed but unknown region identifier
-// or another error if another error occurred.
-func ParseRegion(s string) (Region, error) {
- r, err := language.ParseRegion(s)
- return Region{r}, err
-}
-
-// String returns the BCP 47 representation for the region.
-// It returns "ZZ" for an unspecified region.
-func (r Region) String() string {
- return r.regionID.String()
-}
-
-// ISO3 returns the 3-letter ISO code of r.
-// Note that not all regions have a 3-letter ISO code.
-// In such cases this method returns "ZZZ".
-func (r Region) ISO3() string {
- return r.regionID.ISO3()
-}
-
-// M49 returns the UN M.49 encoding of r, or 0 if this encoding
-// is not defined for r.
-func (r Region) M49() int {
- return r.regionID.M49()
-}
-
-// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
-// may include private-use tags that are assigned by CLDR and used in this
-// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
-func (r Region) IsPrivateUse() bool {
- return r.regionID.IsPrivateUse()
-}
-
-// IsCountry returns whether this region is a country or autonomous area. This
-// includes non-standard definitions from CLDR.
-func (r Region) IsCountry() bool {
- return r.regionID.IsCountry()
-}
-
-// IsGroup returns whether this region defines a collection of regions. This
-// includes non-standard definitions from CLDR.
-func (r Region) IsGroup() bool {
- return r.regionID.IsGroup()
-}
-
-// Contains returns whether Region c is contained by Region r. It returns true
-// if c == r.
-func (r Region) Contains(c Region) bool {
- return r.regionID.Contains(c.regionID)
-}
-
-// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
-// In all other cases it returns either the region itself or an error.
-//
-// This method may return an error for a region for which there exists a
-// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
-// region will already be canonicalized it was obtained from a Tag that was
-// obtained using any of the default methods.
-func (r Region) TLD() (Region, error) {
- tld, err := r.regionID.TLD()
- return Region{tld}, err
-}
-
-// Canonicalize returns the region or a possible replacement if the region is
-// deprecated. It will not return a replacement for deprecated regions that
-// are split into multiple regions.
-func (r Region) Canonicalize() Region {
- return Region{r.regionID.Canonicalize()}
-}
-
-// Variant represents a registered variant of a language as defined by BCP 47.
-type Variant struct {
- variant string
-}
-
-// ParseVariant parses and returns a Variant. An error is returned if s is not
-// a valid variant.
-func ParseVariant(s string) (Variant, error) {
- v, err := language.ParseVariant(s)
- return Variant{v.String()}, err
-}
-
-// String returns the string representation of the variant.
-func (v Variant) String() string {
- return v.variant
-}
diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go
deleted file mode 100644
index ee45f49474..0000000000
--- a/vendor/golang.org/x/text/language/match.go
+++ /dev/null
@@ -1,735 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "errors"
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// A MatchOption configures a Matcher.
-type MatchOption func(*matcher)
-
-// PreferSameScript will, in the absence of a match, result in the first
-// preferred tag with the same script as a supported tag to match this supported
-// tag. The default is currently true, but this may change in the future.
-func PreferSameScript(preferSame bool) MatchOption {
- return func(m *matcher) { m.preferSameScript = preferSame }
-}
-
-// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
-// There doesn't seem to be too much need for multiple types.
-// Making it a concrete type allows MatchStrings to be a method, which will
-// improve its discoverability.
-
-// MatchStrings parses and matches the given strings until one of them matches
-// the language in the Matcher. A string may be an Accept-Language header as
-// handled by ParseAcceptLanguage. The default language is returned if no
-// other language matched.
-func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
- for _, accept := range lang {
- desired, _, err := ParseAcceptLanguage(accept)
- if err != nil {
- continue
- }
- if tag, index, conf := m.Match(desired...); conf != No {
- return tag, index
- }
- }
- tag, index, _ = m.Match()
- return
-}
-
-// Matcher is the interface that wraps the Match method.
-//
-// Match returns the best match for any of the given tags, along with
-// a unique index associated with the returned tag and a confidence
-// score.
-type Matcher interface {
- Match(t ...Tag) (tag Tag, index int, c Confidence)
-}
-
-// Comprehends reports the confidence score for a speaker of a given language
-// to being able to comprehend the written form of an alternative language.
-func Comprehends(speaker, alternative Tag) Confidence {
- _, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
- return c
-}
-
-// NewMatcher returns a Matcher that matches an ordered list of preferred tags
-// against a list of supported tags based on written intelligibility, closeness
-// of dialect, equivalence of subtags and various other rules. It is initialized
-// with the list of supported tags. The first element is used as the default
-// value in case no match is found.
-//
-// Its Match method matches the first of the given Tags to reach a certain
-// confidence threshold. The tags passed to Match should therefore be specified
-// in order of preference. Extensions are ignored for matching.
-//
-// The index returned by the Match method corresponds to the index of the
-// matched tag in t, but is augmented with the Unicode extension ('u')of the
-// corresponding preferred tag. This allows user locale options to be passed
-// transparently.
-func NewMatcher(t []Tag, options ...MatchOption) Matcher {
- return newMatcher(t, options)
-}
-
-func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
- var tt language.Tag
- match, w, c := m.getBest(want...)
- if match != nil {
- tt, index = match.tag, match.index
- } else {
- // TODO: this should be an option
- tt = m.default_.tag
- if m.preferSameScript {
- outer:
- for _, w := range want {
- script, _ := w.Script()
- if script.scriptID == 0 {
- // Don't do anything if there is no script, such as with
- // private subtags.
- continue
- }
- for i, h := range m.supported {
- if script.scriptID == h.maxScript {
- tt, index = h.tag, i
- break outer
- }
- }
- }
- }
- // TODO: select first language tag based on script.
- }
- if w.RegionID != tt.RegionID && w.RegionID != 0 {
- if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) {
- tt.RegionID = w.RegionID
- tt.RemakeString()
- } else if r := w.RegionID.String(); len(r) == 2 {
- // TODO: also filter macro and deprecated.
- tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz")
- }
- }
- // Copy options from the user-provided tag into the result tag. This is hard
- // to do after the fact, so we do it here.
- // TODO: add in alternative variants to -u-va-.
- // TODO: add preferred region to -u-rg-.
- if e := w.Extensions(); len(e) > 0 {
- b := language.Builder{}
- b.SetTag(tt)
- for _, e := range e {
- b.AddExt(e)
- }
- tt = b.Make()
- }
- return makeTag(tt), index, c
-}
-
-// ErrMissingLikelyTagsData indicates no information was available
-// to compute likely values of missing tags.
-var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
-
-// func (t *Tag) setTagsFrom(id Tag) {
-// t.LangID = id.LangID
-// t.ScriptID = id.ScriptID
-// t.RegionID = id.RegionID
-// }
-
-// Tag Matching
-// CLDR defines an algorithm for finding the best match between two sets of language
-// tags. The basic algorithm defines how to score a possible match and then find
-// the match with the best score
-// (see https://www.unicode.org/reports/tr35/#LanguageMatching).
-// Using scoring has several disadvantages. The scoring obfuscates the importance of
-// the various factors considered, making the algorithm harder to understand. Using
-// scoring also requires the full score to be computed for each pair of tags.
-//
-// We will use a different algorithm which aims to have the following properties:
-// - clarity on the precedence of the various selection factors, and
-// - improved performance by allowing early termination of a comparison.
-//
-// Matching algorithm (overview)
-// Input:
-// - supported: a set of supported tags
-// - default: the default tag to return in case there is no match
-// - desired: list of desired tags, ordered by preference, starting with
-// the most-preferred.
-//
-// Algorithm:
-// 1) Set the best match to the lowest confidence level
-// 2) For each tag in "desired":
-// a) For each tag in "supported":
-// 1) compute the match between the two tags.
-// 2) if the match is better than the previous best match, replace it
-// with the new match. (see next section)
-// b) if the current best match is Exact and pin is true the result will be
-// frozen to the language found thusfar, although better matches may
-// still be found for the same language.
-// 3) If the best match so far is below a certain threshold, return "default".
-//
-// Ranking:
-// We use two phases to determine whether one pair of tags are a better match
-// than another pair of tags. First, we determine a rough confidence level. If the
-// levels are different, the one with the highest confidence wins.
-// Second, if the rough confidence levels are identical, we use a set of tie-breaker
-// rules.
-//
-// The confidence level of matching a pair of tags is determined by finding the
-// lowest confidence level of any matches of the corresponding subtags (the
-// result is deemed as good as its weakest link).
-// We define the following levels:
-// Exact - An exact match of a subtag, before adding likely subtags.
-// MaxExact - An exact match of a subtag, after adding likely subtags.
-// [See Note 2].
-// High - High level of mutual intelligibility between different subtag
-// variants.
-// Low - Low level of mutual intelligibility between different subtag
-// variants.
-// No - No mutual intelligibility.
-//
-// The following levels can occur for each type of subtag:
-// Base: Exact, MaxExact, High, Low, No
-// Script: Exact, MaxExact [see Note 3], Low, No
-// Region: Exact, MaxExact, High
-// Variant: Exact, High
-// Private: Exact, No
-//
-// Any result with a confidence level of Low or higher is deemed a possible match.
-// Once a desired tag matches any of the supported tags with a level of MaxExact
-// or higher, the next desired tag is not considered (see Step 2.b).
-// Note that CLDR provides languageMatching data that defines close equivalence
-// classes for base languages, scripts and regions.
-//
-// Tie-breaking
-// If we get the same confidence level for two matches, we apply a sequence of
-// tie-breaking rules. The first that succeeds defines the result. The rules are
-// applied in the following order.
-// 1) Original language was defined and was identical.
-// 2) Original region was defined and was identical.
-// 3) Distance between two maximized regions was the smallest.
-// 4) Original script was defined and was identical.
-// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
-// If there is still no winner after these rules are applied, the first match
-// found wins.
-//
-// Notes:
-// [2] In practice, as matching of Exact is done in a separate phase from
-// matching the other levels, we reuse the Exact level to mean MaxExact in
-// the second phase. As a consequence, we only need the levels defined by
-// the Confidence type. The MaxExact confidence level is mapped to High in
-// the public API.
-// [3] We do not differentiate between maximized script values that were derived
-// from suppressScript versus most likely tag data. We determined that in
-// ranking the two, one ranks just after the other. Moreover, the two cannot
-// occur concurrently. As a consequence, they are identical for practical
-// purposes.
-// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
-// the MaxExact level to allow iw vs he to still be a closer match than
-// en-AU vs en-US, for example.
-// [5] In CLDR a locale inherits fields that are unspecified for this locale
-// from its parent. Therefore, if a locale is a parent of another locale,
-// it is a strong measure for closeness, especially when no other tie
-// breaker rule applies. One could also argue it is inconsistent, for
-// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
-// though its parent is pt-PT according to the inheritance rules.
-//
-// Implementation Details:
-// There are several performance considerations worth pointing out. Most notably,
-// we preprocess as much as possible (within reason) at the time of creation of a
-// matcher. This includes:
-// - creating a per-language map, which includes data for the raw base language
-// and its canonicalized variant (if applicable),
-// - expanding entries for the equivalence classes defined in CLDR's
-// languageMatch data.
-// The per-language map ensures that typically only a very small number of tags
-// need to be considered. The pre-expansion of canonicalized subtags and
-// equivalence classes reduces the amount of map lookups that need to be done at
-// runtime.
-
-// matcher keeps a set of supported language tags, indexed by language.
-type matcher struct {
- default_ *haveTag
- supported []*haveTag
- index map[language.Language]*matchHeader
- passSettings bool
- preferSameScript bool
-}
-
-// matchHeader has the lists of tags for exact matches and matches based on
-// maximized and canonicalized tags for a given language.
-type matchHeader struct {
- haveTags []*haveTag
- original bool
-}
-
-// haveTag holds a supported Tag and its maximized script and region. The maximized
-// or canonicalized language is not stored as it is not needed during matching.
-type haveTag struct {
- tag language.Tag
-
- // index of this tag in the original list of supported tags.
- index int
-
- // conf is the maximum confidence that can result from matching this haveTag.
- // When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
- conf Confidence
-
- // Maximized region and script.
- maxRegion language.Region
- maxScript language.Script
-
- // altScript may be checked as an alternative match to maxScript. If altScript
- // matches, the confidence level for this match is Low. Theoretically there
- // could be multiple alternative scripts. This does not occur in practice.
- altScript language.Script
-
- // nextMax is the index of the next haveTag with the same maximized tags.
- nextMax uint16
-}
-
-func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) {
- max := tag
- if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 {
- max, _ = canonicalize(All, max)
- max, _ = max.Maximize()
- max.RemakeString()
- }
- return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID
-}
-
-// altScript returns an alternative script that may match the given script with
-// a low confidence. At the moment, the langMatch data allows for at most one
-// script to map to another and we rely on this to keep the code simple.
-func altScript(l language.Language, s language.Script) language.Script {
- for _, alt := range matchScript {
- // TODO: also match cases where language is not the same.
- if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&
- language.Script(alt.haveScript) == s {
- return language.Script(alt.wantScript)
- }
- }
- return 0
-}
-
-// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
-// Tags that have the same maximized values are linked by index.
-func (h *matchHeader) addIfNew(n haveTag, exact bool) {
- h.original = h.original || exact
- // Don't add new exact matches.
- for _, v := range h.haveTags {
- if equalsRest(v.tag, n.tag) {
- return
- }
- }
- // Allow duplicate maximized tags, but create a linked list to allow quickly
- // comparing the equivalents and bail out.
- for i, v := range h.haveTags {
- if v.maxScript == n.maxScript &&
- v.maxRegion == n.maxRegion &&
- v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() {
- for h.haveTags[i].nextMax != 0 {
- i = int(h.haveTags[i].nextMax)
- }
- h.haveTags[i].nextMax = uint16(len(h.haveTags))
- break
- }
- }
- h.haveTags = append(h.haveTags, &n)
-}
-
-// header returns the matchHeader for the given language. It creates one if
-// it doesn't already exist.
-func (m *matcher) header(l language.Language) *matchHeader {
- if h := m.index[l]; h != nil {
- return h
- }
- h := &matchHeader{}
- m.index[l] = h
- return h
-}
-
-func toConf(d uint8) Confidence {
- if d <= 10 {
- return High
- }
- if d < 30 {
- return Low
- }
- return No
-}
-
-// newMatcher builds an index for the given supported tags and returns it as
-// a matcher. It also expands the index by considering various equivalence classes
-// for a given tag.
-func newMatcher(supported []Tag, options []MatchOption) *matcher {
- m := &matcher{
- index: make(map[language.Language]*matchHeader),
- preferSameScript: true,
- }
- for _, o := range options {
- o(m)
- }
- if len(supported) == 0 {
- m.default_ = &haveTag{}
- return m
- }
- // Add supported languages to the index. Add exact matches first to give
- // them precedence.
- for i, tag := range supported {
- tt := tag.tag()
- pair, _ := makeHaveTag(tt, i)
- m.header(tt.LangID).addIfNew(pair, true)
- m.supported = append(m.supported, &pair)
- }
- m.default_ = m.header(supported[0].lang()).haveTags[0]
- // Keep these in two different loops to support the case that two equivalent
- // languages are distinguished, such as iw and he.
- for i, tag := range supported {
- tt := tag.tag()
- pair, max := makeHaveTag(tt, i)
- if max != tt.LangID {
- m.header(max).addIfNew(pair, true)
- }
- }
-
- // update is used to add indexes in the map for equivalent languages.
- // update will only add entries to original indexes, thus not computing any
- // transitive relations.
- update := func(want, have uint16, conf Confidence) {
- if hh := m.index[language.Language(have)]; hh != nil {
- if !hh.original {
- return
- }
- hw := m.header(language.Language(want))
- for _, ht := range hh.haveTags {
- v := *ht
- if conf < v.conf {
- v.conf = conf
- }
- v.nextMax = 0 // this value needs to be recomputed
- if v.altScript != 0 {
- v.altScript = altScript(language.Language(want), v.maxScript)
- }
- hw.addIfNew(v, conf == Exact && hh.original)
- }
- }
- }
-
- // Add entries for languages with mutual intelligibility as defined by CLDR's
- // languageMatch data.
- for _, ml := range matchLang {
- update(ml.want, ml.have, toConf(ml.distance))
- if !ml.oneway {
- update(ml.have, ml.want, toConf(ml.distance))
- }
- }
-
- // Add entries for possible canonicalizations. This is an optimization to
- // ensure that only one map lookup needs to be done at runtime per desired tag.
- // First we match deprecated equivalents. If they are perfect equivalents
- // (their canonicalization simply substitutes a different language code, but
- // nothing else), the match confidence is Exact, otherwise it is High.
- for i, lm := range language.AliasMap {
- // If deprecated codes match and there is no fiddling with the script or
- // or region, we consider it an exact match.
- conf := Exact
- if language.AliasTypes[i] != language.Macro {
- if !isExactEquivalent(language.Language(lm.From)) {
- conf = High
- }
- update(lm.To, lm.From, conf)
- }
- update(lm.From, lm.To, conf)
- }
- return m
-}
-
-// getBest gets the best matching tag in m for any of the given tags, taking into
-// account the order of preference of the given tags.
-func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) {
- best := bestMatch{}
- for i, ww := range want {
- w := ww.tag()
- var max language.Tag
- // Check for exact match first.
- h := m.index[w.LangID]
- if w.LangID != 0 {
- if h == nil {
- continue
- }
- // Base language is defined.
- max, _ = canonicalize(Legacy|Deprecated|Macro, w)
- // A region that is added through canonicalization is stronger than
- // a maximized region: set it in the original (e.g. mo -> ro-MD).
- if w.RegionID != max.RegionID {
- w.RegionID = max.RegionID
- }
- // TODO: should we do the same for scripts?
- // See test case: en, sr, nl ; sh ; sr
- max, _ = max.Maximize()
- } else {
- // Base language is not defined.
- if h != nil {
- for i := range h.haveTags {
- have := h.haveTags[i]
- if equalsRest(have.tag, w) {
- return have, w, Exact
- }
- }
- }
- if w.ScriptID == 0 && w.RegionID == 0 {
- // We skip all tags matching und for approximate matching, including
- // private tags.
- continue
- }
- max, _ = w.Maximize()
- if h = m.index[max.LangID]; h == nil {
- continue
- }
- }
- pin := true
- for _, t := range want[i+1:] {
- if w.LangID == t.lang() {
- pin = false
- break
- }
- }
- // Check for match based on maximized tag.
- for i := range h.haveTags {
- have := h.haveTags[i]
- best.update(have, w, max.ScriptID, max.RegionID, pin)
- if best.conf == Exact {
- for have.nextMax != 0 {
- have = h.haveTags[have.nextMax]
- best.update(have, w, max.ScriptID, max.RegionID, pin)
- }
- return best.have, best.want, best.conf
- }
- }
- }
- if best.conf <= No {
- if len(want) != 0 {
- return nil, want[0].tag(), No
- }
- return nil, language.Tag{}, No
- }
- return best.have, best.want, best.conf
-}
-
-// bestMatch accumulates the best match so far.
-type bestMatch struct {
- have *haveTag
- want language.Tag
- conf Confidence
- pinnedRegion language.Region
- pinLanguage bool
- sameRegionGroup bool
- // Cached results from applying tie-breaking rules.
- origLang bool
- origReg bool
- paradigmReg bool
- regGroupDist uint8
- origScript bool
-}
-
-// update updates the existing best match if the new pair is considered to be a
-// better match. To determine if the given pair is a better match, it first
-// computes the rough confidence level. If this surpasses the current match, it
-// will replace it and update the tie-breaker rule cache. If there is a tie, it
-// proceeds with applying a series of tie-breaker rules. If there is no
-// conclusive winner after applying the tie-breaker rules, it leaves the current
-// match as the preferred match.
-//
-// If pin is true and have and tag are a strong match, it will henceforth only
-// consider matches for this language. This corresponds to the idea that most
-// users have a strong preference for the first defined language. A user can
-// still prefer a second language over a dialect of the preferred language by
-// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
-// be false.
-func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) {
- // Bail if the maximum attainable confidence is below that of the current best match.
- c := have.conf
- if c < m.conf {
- return
- }
- // Don't change the language once we already have found an exact match.
- if m.pinLanguage && tag.LangID != m.want.LangID {
- return
- }
- // Pin the region group if we are comparing tags for the same language.
- if tag.LangID == m.want.LangID && m.sameRegionGroup {
- _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID)
- if !sameGroup {
- return
- }
- }
- if c == Exact && have.maxScript == maxScript {
- // If there is another language and then another entry of this language,
- // don't pin anything, otherwise pin the language.
- m.pinLanguage = pin
- }
- if equalsRest(have.tag, tag) {
- } else if have.maxScript != maxScript {
- // There is usually very little comprehension between different scripts.
- // In a few cases there may still be Low comprehension. This possibility
- // is pre-computed and stored in have.altScript.
- if Low < m.conf || have.altScript != maxScript {
- return
- }
- c = Low
- } else if have.maxRegion != maxRegion {
- if High < c {
- // There is usually a small difference between languages across regions.
- c = High
- }
- }
-
- // We store the results of the computations of the tie-breaker rules along
- // with the best match. There is no need to do the checks once we determine
- // we have a winner, but we do still need to do the tie-breaker computations.
- // We use "beaten" to keep track if we still need to do the checks.
- beaten := false // true if the new pair defeats the current one.
- if c != m.conf {
- if c < m.conf {
- return
- }
- beaten = true
- }
-
- // Tie-breaker rules:
- // We prefer if the pre-maximized language was specified and identical.
- origLang := have.tag.LangID == tag.LangID && tag.LangID != 0
- if !beaten && m.origLang != origLang {
- if m.origLang {
- return
- }
- beaten = true
- }
-
- // We prefer if the pre-maximized region was specified and identical.
- origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0
- if !beaten && m.origReg != origReg {
- if m.origReg {
- return
- }
- beaten = true
- }
-
- regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID)
- if !beaten && m.regGroupDist != regGroupDist {
- if regGroupDist > m.regGroupDist {
- return
- }
- beaten = true
- }
-
- paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion)
- if !beaten && m.paradigmReg != paradigmReg {
- if !paradigmReg {
- return
- }
- beaten = true
- }
-
- // Next we prefer if the pre-maximized script was specified and identical.
- origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0
- if !beaten && m.origScript != origScript {
- if m.origScript {
- return
- }
- beaten = true
- }
-
- // Update m to the newly found best match.
- if beaten {
- m.have = have
- m.want = tag
- m.conf = c
- m.pinnedRegion = maxRegion
- m.sameRegionGroup = sameGroup
- m.origLang = origLang
- m.origReg = origReg
- m.paradigmReg = paradigmReg
- m.origScript = origScript
- m.regGroupDist = regGroupDist
- }
-}
-
-func isParadigmLocale(lang language.Language, r language.Region) bool {
- for _, e := range paradigmLocales {
- if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) {
- return true
- }
- }
- return false
-}
-
-// regionGroupDist computes the distance between two regions based on their
-// CLDR grouping.
-func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) {
- const defaultDistance = 4
-
- aGroup := uint(regionToGroups[a]) << 1
- bGroup := uint(regionToGroups[b]) << 1
- for _, ri := range matchRegion {
- if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) {
- group := uint(1 << (ri.group &^ 0x80))
- if 0x80&ri.group == 0 {
- if aGroup&bGroup&group != 0 { // Both regions are in the group.
- return ri.distance, ri.distance == defaultDistance
- }
- } else {
- if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
- return ri.distance, ri.distance == defaultDistance
- }
- }
- }
- }
- return defaultDistance, true
-}
-
-// equalsRest compares everything except the language.
-func equalsRest(a, b language.Tag) bool {
- // TODO: don't include extensions in this comparison. To do this efficiently,
- // though, we should handle private tags separately.
- return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()
-}
-
-// isExactEquivalent returns true if canonicalizing the language will not alter
-// the script or region of a tag.
-func isExactEquivalent(l language.Language) bool {
- for _, o := range notEquivalent {
- if o == l {
- return false
- }
- }
- return true
-}
-
-var notEquivalent []language.Language
-
-func init() {
- // Create a list of all languages for which canonicalization may alter the
- // script or region.
- for _, lm := range language.AliasMap {
- tag := language.Tag{LangID: language.Language(lm.From)}
- if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 {
- notEquivalent = append(notEquivalent, language.Language(lm.From))
- }
- }
- // Maximize undefined regions of paradigm locales.
- for i, v := range paradigmLocales {
- t := language.Tag{LangID: language.Language(v[0])}
- max, _ := t.Maximize()
- if v[1] == 0 {
- paradigmLocales[i][1] = uint16(max.RegionID)
- }
- if v[2] == 0 {
- paradigmLocales[i][2] = uint16(max.RegionID)
- }
- }
-}
diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go
deleted file mode 100644
index 4d57222e77..0000000000
--- a/vendor/golang.org/x/text/language/parse.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import (
- "errors"
- "sort"
- "strconv"
- "strings"
-
- "golang.org/x/text/internal/language"
-)
-
-// ValueError is returned by any of the parsing functions when the
-// input is well-formed but the respective subtag is not recognized
-// as a valid value.
-type ValueError interface {
- error
-
- // Subtag returns the subtag for which the error occurred.
- Subtag() string
-}
-
-// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
-// failed it returns an error and any part of the tag that could be parsed.
-// If parsing succeeded but an unknown value was found, it returns
-// ValueError. The Tag returned in this case is just stripped of the unknown
-// value. All other values are preserved. It accepts tags in the BCP 47 format
-// and extensions to this standard defined in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// The resulting tag is canonicalized using the default canonicalization type.
-func Parse(s string) (t Tag, err error) {
- return Default.Parse(s)
-}
-
-// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
-// failed it returns an error and any part of the tag that could be parsed.
-// If parsing succeeded but an unknown value was found, it returns
-// ValueError. The Tag returned in this case is just stripped of the unknown
-// value. All other values are preserved. It accepts tags in the BCP 47 format
-// and extensions to this standard defined in
-// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
-// The resulting tag is canonicalized using the canonicalization type c.
-func (c CanonType) Parse(s string) (t Tag, err error) {
- defer func() {
- if recover() != nil {
- t = Tag{}
- err = language.ErrSyntax
- }
- }()
-
- tt, err := language.Parse(s)
- if err != nil {
- return makeTag(tt), err
- }
- tt, changed := canonicalize(c, tt)
- if changed {
- tt.RemakeString()
- }
- return makeTag(tt), err
-}
-
-// Compose creates a Tag from individual parts, which may be of type Tag, Base,
-// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
-// Base, Script or Region or slice of type Variant or Extension is passed more
-// than once, the latter will overwrite the former. Variants and Extensions are
-// accumulated, but if two extensions of the same type are passed, the latter
-// will replace the former. For -u extensions, though, the key-type pairs are
-// added, where later values overwrite older ones. A Tag overwrites all former
-// values and typically only makes sense as the first argument. The resulting
-// tag is returned after canonicalizing using the Default CanonType. If one or
-// more errors are encountered, one of the errors is returned.
-func Compose(part ...interface{}) (t Tag, err error) {
- return Default.Compose(part...)
-}
-
-// Compose creates a Tag from individual parts, which may be of type Tag, Base,
-// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
-// Base, Script or Region or slice of type Variant or Extension is passed more
-// than once, the latter will overwrite the former. Variants and Extensions are
-// accumulated, but if two extensions of the same type are passed, the latter
-// will replace the former. For -u extensions, though, the key-type pairs are
-// added, where later values overwrite older ones. A Tag overwrites all former
-// values and typically only makes sense as the first argument. The resulting
-// tag is returned after canonicalizing using CanonType c. If one or more errors
-// are encountered, one of the errors is returned.
-func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
- defer func() {
- if recover() != nil {
- t = Tag{}
- err = language.ErrSyntax
- }
- }()
-
- var b language.Builder
- if err = update(&b, part...); err != nil {
- return und, err
- }
- b.Tag, _ = canonicalize(c, b.Tag)
- return makeTag(b.Make()), err
-}
-
-var errInvalidArgument = errors.New("invalid Extension or Variant")
-
-func update(b *language.Builder, part ...interface{}) (err error) {
- for _, x := range part {
- switch v := x.(type) {
- case Tag:
- b.SetTag(v.tag())
- case Base:
- b.Tag.LangID = v.langID
- case Script:
- b.Tag.ScriptID = v.scriptID
- case Region:
- b.Tag.RegionID = v.regionID
- case Variant:
- if v.variant == "" {
- err = errInvalidArgument
- break
- }
- b.AddVariant(v.variant)
- case Extension:
- if v.s == "" {
- err = errInvalidArgument
- break
- }
- b.SetExt(v.s)
- case []Variant:
- b.ClearVariants()
- for _, v := range v {
- b.AddVariant(v.variant)
- }
- case []Extension:
- b.ClearExtensions()
- for _, e := range v {
- b.SetExt(e.s)
- }
- // TODO: support parsing of raw strings based on morphology or just extensions?
- case error:
- if v != nil {
- err = v
- }
- }
- }
- return
-}
-
-var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
-var errTagListTooLarge = errors.New("tag list exceeds max length")
-
-// ParseAcceptLanguage parses the contents of an Accept-Language header as
-// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
-// a list of corresponding quality weights. It is more permissive than RFC 2616
-// and may return non-nil slices even if the input is not valid.
-// The Tags will be sorted by highest weight first and then by first occurrence.
-// Tags with a weight of zero will be dropped. An error will be returned if the
-// input could not be parsed.
-func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
- defer func() {
- if recover() != nil {
- tag = nil
- q = nil
- err = language.ErrSyntax
- }
- }()
-
- if strings.Count(s, "-") > 1000 {
- return nil, nil, errTagListTooLarge
- }
-
- var entry string
- for s != "" {
- if entry, s = split(s, ','); entry == "" {
- continue
- }
-
- entry, weight := split(entry, ';')
-
- // Scan the language.
- t, err := Parse(entry)
- if err != nil {
- id, ok := acceptFallback[entry]
- if !ok {
- return nil, nil, err
- }
- t = makeTag(language.Tag{LangID: id})
- }
-
- // Scan the optional weight.
- w := 1.0
- if weight != "" {
- weight = consume(weight, 'q')
- weight = consume(weight, '=')
- // consume returns the empty string when a token could not be
- // consumed, resulting in an error for ParseFloat.
- if w, err = strconv.ParseFloat(weight, 32); err != nil {
- return nil, nil, errInvalidWeight
- }
- // Drop tags with a quality weight of 0.
- if w <= 0 {
- continue
- }
- }
-
- tag = append(tag, t)
- q = append(q, float32(w))
- }
- sort.Stable(&tagSort{tag, q})
- return tag, q, nil
-}
-
-// consume removes a leading token c from s and returns the result or the empty
-// string if there is no such token.
-func consume(s string, c byte) string {
- if s == "" || s[0] != c {
- return ""
- }
- return strings.TrimSpace(s[1:])
-}
-
-func split(s string, c byte) (head, tail string) {
- if i := strings.IndexByte(s, c); i >= 0 {
- return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
- }
- return strings.TrimSpace(s), ""
-}
-
-// Add hack mapping to deal with a small number of cases that occur
-// in Accept-Language (with reasonable frequency).
-var acceptFallback = map[string]language.Language{
- "english": _en,
- "deutsch": _de,
- "italian": _it,
- "french": _fr,
- "*": _mul, // defined in the spec to match all languages.
-}
-
-type tagSort struct {
- tag []Tag
- q []float32
-}
-
-func (s *tagSort) Len() int {
- return len(s.q)
-}
-
-func (s *tagSort) Less(i, j int) bool {
- return s.q[i] > s.q[j]
-}
-
-func (s *tagSort) Swap(i, j int) {
- s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
- s.q[i], s.q[j] = s.q[j], s.q[i]
-}
diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go
deleted file mode 100644
index 34a732b699..0000000000
--- a/vendor/golang.org/x/text/language/tables.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-
-package language
-
-// CLDRVersion is the CLDR version from which the tables in this package are derived.
-const CLDRVersion = "32"
-
-const (
- _de = 269
- _en = 313
- _fr = 350
- _it = 505
- _mo = 784
- _no = 879
- _nb = 839
- _pt = 960
- _sh = 1031
- _mul = 806
- _und = 0
-)
-const (
- _001 = 1
- _419 = 31
- _BR = 65
- _CA = 73
- _ES = 110
- _GB = 123
- _MD = 188
- _PT = 238
- _UK = 306
- _US = 309
- _ZZ = 357
- _XA = 323
- _XC = 325
- _XK = 333
-)
-const (
- _Latn = 90
- _Hani = 57
- _Hans = 59
- _Hant = 60
- _Qaaa = 147
- _Qaai = 155
- _Qabx = 196
- _Zinh = 252
- _Zyyy = 257
- _Zzzz = 258
-)
-
-var regionToGroups = []uint8{ // 358 elements
- // Entry 0 - 3F
- 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00,
- 0x00, 0x04, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00,
- 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x04,
- // Entry 40 - 7F
- 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00,
- 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x08,
- 0x00, 0x04, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00,
- // Entry 80 - BF
- 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00,
- 0x00, 0x04, 0x01, 0x00, 0x04, 0x02, 0x00, 0x04,
- 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
- 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00,
- // Entry C0 - FF
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01,
- 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00,
- 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- // Entry 100 - 13F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, 0x04,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x04, 0x00,
- 0x00, 0x04, 0x00, 0x04, 0x04, 0x05, 0x00, 0x00,
- // Entry 140 - 17F
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-} // Size: 382 bytes
-
-var paradigmLocales = [][3]uint16{ // 3 elements
- 0: [3]uint16{0x139, 0x0, 0x7b},
- 1: [3]uint16{0x13e, 0x0, 0x1f},
- 2: [3]uint16{0x3c0, 0x41, 0xee},
-} // Size: 42 bytes
-
-type mutualIntelligibility struct {
- want uint16
- have uint16
- distance uint8
- oneway bool
-}
-type scriptIntelligibility struct {
- wantLang uint16
- haveLang uint16
- wantScript uint8
- haveScript uint8
- distance uint8
-}
-type regionIntelligibility struct {
- lang uint16
- script uint8
- group uint8
- distance uint8
-}
-
-// matchLang holds pairs of langIDs of base languages that are typically
-// mutually intelligible. Each pair is associated with a confidence and
-// whether the intelligibility goes one or both ways.
-var matchLang = []mutualIntelligibility{ // 113 elements
- 0: {want: 0x1d1, have: 0xb7, distance: 0x4, oneway: false},
- 1: {want: 0x407, have: 0xb7, distance: 0x4, oneway: false},
- 2: {want: 0x407, have: 0x1d1, distance: 0x4, oneway: false},
- 3: {want: 0x407, have: 0x432, distance: 0x4, oneway: false},
- 4: {want: 0x43a, have: 0x1, distance: 0x4, oneway: false},
- 5: {want: 0x1a3, have: 0x10d, distance: 0x4, oneway: true},
- 6: {want: 0x295, have: 0x10d, distance: 0x4, oneway: true},
- 7: {want: 0x101, have: 0x36f, distance: 0x8, oneway: false},
- 8: {want: 0x101, have: 0x347, distance: 0x8, oneway: false},
- 9: {want: 0x5, have: 0x3e2, distance: 0xa, oneway: true},
- 10: {want: 0xd, have: 0x139, distance: 0xa, oneway: true},
- 11: {want: 0x16, have: 0x367, distance: 0xa, oneway: true},
- 12: {want: 0x21, have: 0x139, distance: 0xa, oneway: true},
- 13: {want: 0x56, have: 0x13e, distance: 0xa, oneway: true},
- 14: {want: 0x58, have: 0x3e2, distance: 0xa, oneway: true},
- 15: {want: 0x71, have: 0x3e2, distance: 0xa, oneway: true},
- 16: {want: 0x75, have: 0x139, distance: 0xa, oneway: true},
- 17: {want: 0x82, have: 0x1be, distance: 0xa, oneway: true},
- 18: {want: 0xa5, have: 0x139, distance: 0xa, oneway: true},
- 19: {want: 0xb2, have: 0x15e, distance: 0xa, oneway: true},
- 20: {want: 0xdd, have: 0x153, distance: 0xa, oneway: true},
- 21: {want: 0xe5, have: 0x139, distance: 0xa, oneway: true},
- 22: {want: 0xe9, have: 0x3a, distance: 0xa, oneway: true},
- 23: {want: 0xf0, have: 0x15e, distance: 0xa, oneway: true},
- 24: {want: 0xf9, have: 0x15e, distance: 0xa, oneway: true},
- 25: {want: 0x100, have: 0x139, distance: 0xa, oneway: true},
- 26: {want: 0x130, have: 0x139, distance: 0xa, oneway: true},
- 27: {want: 0x13c, have: 0x139, distance: 0xa, oneway: true},
- 28: {want: 0x140, have: 0x151, distance: 0xa, oneway: true},
- 29: {want: 0x145, have: 0x13e, distance: 0xa, oneway: true},
- 30: {want: 0x158, have: 0x101, distance: 0xa, oneway: true},
- 31: {want: 0x16d, have: 0x367, distance: 0xa, oneway: true},
- 32: {want: 0x16e, have: 0x139, distance: 0xa, oneway: true},
- 33: {want: 0x16f, have: 0x139, distance: 0xa, oneway: true},
- 34: {want: 0x17e, have: 0x139, distance: 0xa, oneway: true},
- 35: {want: 0x190, have: 0x13e, distance: 0xa, oneway: true},
- 36: {want: 0x194, have: 0x13e, distance: 0xa, oneway: true},
- 37: {want: 0x1a4, have: 0x1be, distance: 0xa, oneway: true},
- 38: {want: 0x1b4, have: 0x139, distance: 0xa, oneway: true},
- 39: {want: 0x1b8, have: 0x139, distance: 0xa, oneway: true},
- 40: {want: 0x1d4, have: 0x15e, distance: 0xa, oneway: true},
- 41: {want: 0x1d7, have: 0x3e2, distance: 0xa, oneway: true},
- 42: {want: 0x1d9, have: 0x139, distance: 0xa, oneway: true},
- 43: {want: 0x1e7, have: 0x139, distance: 0xa, oneway: true},
- 44: {want: 0x1f8, have: 0x139, distance: 0xa, oneway: true},
- 45: {want: 0x20e, have: 0x1e1, distance: 0xa, oneway: true},
- 46: {want: 0x210, have: 0x139, distance: 0xa, oneway: true},
- 47: {want: 0x22d, have: 0x15e, distance: 0xa, oneway: true},
- 48: {want: 0x242, have: 0x3e2, distance: 0xa, oneway: true},
- 49: {want: 0x24a, have: 0x139, distance: 0xa, oneway: true},
- 50: {want: 0x251, have: 0x139, distance: 0xa, oneway: true},
- 51: {want: 0x265, have: 0x139, distance: 0xa, oneway: true},
- 52: {want: 0x274, have: 0x48a, distance: 0xa, oneway: true},
- 53: {want: 0x28a, have: 0x3e2, distance: 0xa, oneway: true},
- 54: {want: 0x28e, have: 0x1f9, distance: 0xa, oneway: true},
- 55: {want: 0x2a3, have: 0x139, distance: 0xa, oneway: true},
- 56: {want: 0x2b5, have: 0x15e, distance: 0xa, oneway: true},
- 57: {want: 0x2b8, have: 0x139, distance: 0xa, oneway: true},
- 58: {want: 0x2be, have: 0x139, distance: 0xa, oneway: true},
- 59: {want: 0x2c3, have: 0x15e, distance: 0xa, oneway: true},
- 60: {want: 0x2ed, have: 0x139, distance: 0xa, oneway: true},
- 61: {want: 0x2f1, have: 0x15e, distance: 0xa, oneway: true},
- 62: {want: 0x2fa, have: 0x139, distance: 0xa, oneway: true},
- 63: {want: 0x2ff, have: 0x7e, distance: 0xa, oneway: true},
- 64: {want: 0x304, have: 0x139, distance: 0xa, oneway: true},
- 65: {want: 0x30b, have: 0x3e2, distance: 0xa, oneway: true},
- 66: {want: 0x31b, have: 0x1be, distance: 0xa, oneway: true},
- 67: {want: 0x31f, have: 0x1e1, distance: 0xa, oneway: true},
- 68: {want: 0x320, have: 0x139, distance: 0xa, oneway: true},
- 69: {want: 0x331, have: 0x139, distance: 0xa, oneway: true},
- 70: {want: 0x351, have: 0x139, distance: 0xa, oneway: true},
- 71: {want: 0x36a, have: 0x347, distance: 0xa, oneway: false},
- 72: {want: 0x36a, have: 0x36f, distance: 0xa, oneway: true},
- 73: {want: 0x37a, have: 0x139, distance: 0xa, oneway: true},
- 74: {want: 0x387, have: 0x139, distance: 0xa, oneway: true},
- 75: {want: 0x389, have: 0x139, distance: 0xa, oneway: true},
- 76: {want: 0x38b, have: 0x15e, distance: 0xa, oneway: true},
- 77: {want: 0x390, have: 0x139, distance: 0xa, oneway: true},
- 78: {want: 0x395, have: 0x139, distance: 0xa, oneway: true},
- 79: {want: 0x39d, have: 0x139, distance: 0xa, oneway: true},
- 80: {want: 0x3a5, have: 0x139, distance: 0xa, oneway: true},
- 81: {want: 0x3be, have: 0x139, distance: 0xa, oneway: true},
- 82: {want: 0x3c4, have: 0x13e, distance: 0xa, oneway: true},
- 83: {want: 0x3d4, have: 0x10d, distance: 0xa, oneway: true},
- 84: {want: 0x3d9, have: 0x139, distance: 0xa, oneway: true},
- 85: {want: 0x3e5, have: 0x15e, distance: 0xa, oneway: true},
- 86: {want: 0x3e9, have: 0x1be, distance: 0xa, oneway: true},
- 87: {want: 0x3fa, have: 0x139, distance: 0xa, oneway: true},
- 88: {want: 0x40c, have: 0x139, distance: 0xa, oneway: true},
- 89: {want: 0x423, have: 0x139, distance: 0xa, oneway: true},
- 90: {want: 0x429, have: 0x139, distance: 0xa, oneway: true},
- 91: {want: 0x431, have: 0x139, distance: 0xa, oneway: true},
- 92: {want: 0x43b, have: 0x139, distance: 0xa, oneway: true},
- 93: {want: 0x43e, have: 0x1e1, distance: 0xa, oneway: true},
- 94: {want: 0x445, have: 0x139, distance: 0xa, oneway: true},
- 95: {want: 0x450, have: 0x139, distance: 0xa, oneway: true},
- 96: {want: 0x461, have: 0x139, distance: 0xa, oneway: true},
- 97: {want: 0x467, have: 0x3e2, distance: 0xa, oneway: true},
- 98: {want: 0x46f, have: 0x139, distance: 0xa, oneway: true},
- 99: {want: 0x476, have: 0x3e2, distance: 0xa, oneway: true},
- 100: {want: 0x3883, have: 0x139, distance: 0xa, oneway: true},
- 101: {want: 0x480, have: 0x139, distance: 0xa, oneway: true},
- 102: {want: 0x482, have: 0x139, distance: 0xa, oneway: true},
- 103: {want: 0x494, have: 0x3e2, distance: 0xa, oneway: true},
- 104: {want: 0x49d, have: 0x139, distance: 0xa, oneway: true},
- 105: {want: 0x4ac, have: 0x529, distance: 0xa, oneway: true},
- 106: {want: 0x4b4, have: 0x139, distance: 0xa, oneway: true},
- 107: {want: 0x4bc, have: 0x3e2, distance: 0xa, oneway: true},
- 108: {want: 0x4e5, have: 0x15e, distance: 0xa, oneway: true},
- 109: {want: 0x4f2, have: 0x139, distance: 0xa, oneway: true},
- 110: {want: 0x512, have: 0x139, distance: 0xa, oneway: true},
- 111: {want: 0x518, have: 0x139, distance: 0xa, oneway: true},
- 112: {want: 0x52f, have: 0x139, distance: 0xa, oneway: true},
-} // Size: 702 bytes
-
-// matchScript holds pairs of scriptIDs where readers of one script
-// can typically also read the other. Each is associated with a confidence.
-var matchScript = []scriptIntelligibility{ // 26 elements
- 0: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x5a, haveScript: 0x20, distance: 0x5},
- 1: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x20, haveScript: 0x5a, distance: 0x5},
- 2: {wantLang: 0x58, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa},
- 3: {wantLang: 0xa5, haveLang: 0x139, wantScript: 0xe, haveScript: 0x5a, distance: 0xa},
- 4: {wantLang: 0x1d7, haveLang: 0x3e2, wantScript: 0x8, haveScript: 0x20, distance: 0xa},
- 5: {wantLang: 0x210, haveLang: 0x139, wantScript: 0x2e, haveScript: 0x5a, distance: 0xa},
- 6: {wantLang: 0x24a, haveLang: 0x139, wantScript: 0x4e, haveScript: 0x5a, distance: 0xa},
- 7: {wantLang: 0x251, haveLang: 0x139, wantScript: 0x52, haveScript: 0x5a, distance: 0xa},
- 8: {wantLang: 0x2b8, haveLang: 0x139, wantScript: 0x57, haveScript: 0x5a, distance: 0xa},
- 9: {wantLang: 0x304, haveLang: 0x139, wantScript: 0x6e, haveScript: 0x5a, distance: 0xa},
- 10: {wantLang: 0x331, haveLang: 0x139, wantScript: 0x75, haveScript: 0x5a, distance: 0xa},
- 11: {wantLang: 0x351, haveLang: 0x139, wantScript: 0x22, haveScript: 0x5a, distance: 0xa},
- 12: {wantLang: 0x395, haveLang: 0x139, wantScript: 0x81, haveScript: 0x5a, distance: 0xa},
- 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5a, distance: 0xa},
- 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},
- 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},
- 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd4, haveScript: 0x5a, distance: 0xa},
- 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe3, haveScript: 0x5a, distance: 0xa},
- 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5a, distance: 0xa},
- 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5a, distance: 0xa},
- 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa},
- 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5a, distance: 0xa},
- 22: {wantLang: 0x4bc, haveLang: 0x3e2, wantScript: 0x5a, haveScript: 0x20, distance: 0xa},
- 23: {wantLang: 0x512, haveLang: 0x139, wantScript: 0x3e, haveScript: 0x5a, distance: 0xa},
- 24: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3b, haveScript: 0x3c, distance: 0xf},
- 25: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3c, haveScript: 0x3b, distance: 0x13},
-} // Size: 232 bytes
-
-var matchRegion = []regionIntelligibility{ // 15 elements
- 0: {lang: 0x3a, script: 0x0, group: 0x4, distance: 0x4},
- 1: {lang: 0x3a, script: 0x0, group: 0x84, distance: 0x4},
- 2: {lang: 0x139, script: 0x0, group: 0x1, distance: 0x4},
- 3: {lang: 0x139, script: 0x0, group: 0x81, distance: 0x4},
- 4: {lang: 0x13e, script: 0x0, group: 0x3, distance: 0x4},
- 5: {lang: 0x13e, script: 0x0, group: 0x83, distance: 0x4},
- 6: {lang: 0x3c0, script: 0x0, group: 0x3, distance: 0x4},
- 7: {lang: 0x3c0, script: 0x0, group: 0x83, distance: 0x4},
- 8: {lang: 0x529, script: 0x3c, group: 0x2, distance: 0x4},
- 9: {lang: 0x529, script: 0x3c, group: 0x82, distance: 0x4},
- 10: {lang: 0x3a, script: 0x0, group: 0x80, distance: 0x5},
- 11: {lang: 0x139, script: 0x0, group: 0x80, distance: 0x5},
- 12: {lang: 0x13e, script: 0x0, group: 0x80, distance: 0x5},
- 13: {lang: 0x3c0, script: 0x0, group: 0x80, distance: 0x5},
- 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5},
-} // Size: 114 bytes
-
-// Total table size 1472 bytes (1KiB); checksum: F86C669
diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go
deleted file mode 100644
index 42ea792666..0000000000
--- a/vendor/golang.org/x/text/language/tags.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package language
-
-import "golang.org/x/text/internal/language/compact"
-
-// TODO: Various sets of commonly use tags and regions.
-
-// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
-// It simplifies safe initialization of Tag values.
-func MustParse(s string) Tag {
- t, err := Parse(s)
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
-// It simplifies safe initialization of Tag values.
-func (c CanonType) MustParse(s string) Tag {
- t, err := c.Parse(s)
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
-// It simplifies safe initialization of Base values.
-func MustParseBase(s string) Base {
- b, err := ParseBase(s)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-// MustParseScript is like ParseScript, but panics if the given script cannot be
-// parsed. It simplifies safe initialization of Script values.
-func MustParseScript(s string) Script {
- scr, err := ParseScript(s)
- if err != nil {
- panic(err)
- }
- return scr
-}
-
-// MustParseRegion is like ParseRegion, but panics if the given region cannot be
-// parsed. It simplifies safe initialization of Region values.
-func MustParseRegion(s string) Region {
- r, err := ParseRegion(s)
- if err != nil {
- panic(err)
- }
- return r
-}
-
-var (
- und = Tag{}
-
- Und Tag = Tag{}
-
- Afrikaans Tag = Tag(compact.Afrikaans)
- Amharic Tag = Tag(compact.Amharic)
- Arabic Tag = Tag(compact.Arabic)
- ModernStandardArabic Tag = Tag(compact.ModernStandardArabic)
- Azerbaijani Tag = Tag(compact.Azerbaijani)
- Bulgarian Tag = Tag(compact.Bulgarian)
- Bengali Tag = Tag(compact.Bengali)
- Catalan Tag = Tag(compact.Catalan)
- Czech Tag = Tag(compact.Czech)
- Danish Tag = Tag(compact.Danish)
- German Tag = Tag(compact.German)
- Greek Tag = Tag(compact.Greek)
- English Tag = Tag(compact.English)
- AmericanEnglish Tag = Tag(compact.AmericanEnglish)
- BritishEnglish Tag = Tag(compact.BritishEnglish)
- Spanish Tag = Tag(compact.Spanish)
- EuropeanSpanish Tag = Tag(compact.EuropeanSpanish)
- LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish)
- Estonian Tag = Tag(compact.Estonian)
- Persian Tag = Tag(compact.Persian)
- Finnish Tag = Tag(compact.Finnish)
- Filipino Tag = Tag(compact.Filipino)
- French Tag = Tag(compact.French)
- CanadianFrench Tag = Tag(compact.CanadianFrench)
- Gujarati Tag = Tag(compact.Gujarati)
- Hebrew Tag = Tag(compact.Hebrew)
- Hindi Tag = Tag(compact.Hindi)
- Croatian Tag = Tag(compact.Croatian)
- Hungarian Tag = Tag(compact.Hungarian)
- Armenian Tag = Tag(compact.Armenian)
- Indonesian Tag = Tag(compact.Indonesian)
- Icelandic Tag = Tag(compact.Icelandic)
- Italian Tag = Tag(compact.Italian)
- Japanese Tag = Tag(compact.Japanese)
- Georgian Tag = Tag(compact.Georgian)
- Kazakh Tag = Tag(compact.Kazakh)
- Khmer Tag = Tag(compact.Khmer)
- Kannada Tag = Tag(compact.Kannada)
- Korean Tag = Tag(compact.Korean)
- Kirghiz Tag = Tag(compact.Kirghiz)
- Lao Tag = Tag(compact.Lao)
- Lithuanian Tag = Tag(compact.Lithuanian)
- Latvian Tag = Tag(compact.Latvian)
- Macedonian Tag = Tag(compact.Macedonian)
- Malayalam Tag = Tag(compact.Malayalam)
- Mongolian Tag = Tag(compact.Mongolian)
- Marathi Tag = Tag(compact.Marathi)
- Malay Tag = Tag(compact.Malay)
- Burmese Tag = Tag(compact.Burmese)
- Nepali Tag = Tag(compact.Nepali)
- Dutch Tag = Tag(compact.Dutch)
- Norwegian Tag = Tag(compact.Norwegian)
- Punjabi Tag = Tag(compact.Punjabi)
- Polish Tag = Tag(compact.Polish)
- Portuguese Tag = Tag(compact.Portuguese)
- BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese)
- EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese)
- Romanian Tag = Tag(compact.Romanian)
- Russian Tag = Tag(compact.Russian)
- Sinhala Tag = Tag(compact.Sinhala)
- Slovak Tag = Tag(compact.Slovak)
- Slovenian Tag = Tag(compact.Slovenian)
- Albanian Tag = Tag(compact.Albanian)
- Serbian Tag = Tag(compact.Serbian)
- SerbianLatin Tag = Tag(compact.SerbianLatin)
- Swedish Tag = Tag(compact.Swedish)
- Swahili Tag = Tag(compact.Swahili)
- Tamil Tag = Tag(compact.Tamil)
- Telugu Tag = Tag(compact.Telugu)
- Thai Tag = Tag(compact.Thai)
- Turkish Tag = Tag(compact.Turkish)
- Ukrainian Tag = Tag(compact.Ukrainian)
- Urdu Tag = Tag(compact.Urdu)
- Uzbek Tag = Tag(compact.Uzbek)
- Vietnamese Tag = Tag(compact.Vietnamese)
- Chinese Tag = Tag(compact.Chinese)
- SimplifiedChinese Tag = Tag(compact.SimplifiedChinese)
- TraditionalChinese Tag = Tag(compact.TraditionalChinese)
- Zulu Tag = Tag(compact.Zulu)
-)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index ba53cdcdd1..a0dc0b5e27 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
return out.Bytes(), err
}
-// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
-// in the same executable. This function cannot import data from
+// IImportShallow decodes "shallow" types.Package data encoded by
+// IExportShallow in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
-func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
+func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) {
const bundle = false
- pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
+ pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert)
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 448f903e86..be6dace153 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -85,7 +85,7 @@ const (
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
- pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
+ pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil)
if err != nil {
return 0, nil, err
}
@@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
// IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
- return iimportCommon(fset, imports, data, true, "", nil)
+ return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil)
}
-func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
+// A GetPackageFunc is a function that gets the package with the given path
+// from the importer state, creating it (with the specified name) if necessary.
+// It is an abstraction of the map historically used to memoize package creation.
+//
+// Two calls with the same path must return the same package.
+//
+// If the given getPackage func returns nil, the import will fail.
+type GetPackageFunc = func(path, name string) *types.Package
+
+// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the
+// given map of package path -> package.
+//
+// The resulting func may mutate m: if a requested package is not found, a new
+// package will be inserted into m.
+func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc {
+ return func(path, name string) *types.Package {
+ if _, ok := m[path]; !ok {
+ m[path] = types.NewPackage(path, name)
+ }
+ return m[path]
+ }
+}
+
+func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
const currentVersion = iexportVersionCurrent
version := int64(-1)
if !debug {
@@ -195,10 +218,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
if pkgPath == "" {
pkgPath = path
}
- pkg := imports[pkgPath]
+ pkg := getPackage(pkgPath, pkgName)
if pkg == nil {
- pkg = types.NewPackage(pkgPath, pkgName)
- imports[pkgPath] = pkg
+ errorf("internal error: getPackage returned nil package for %s", pkgPath)
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
index a3fb2d4f29..7e638ec24f 100644
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
@@ -7,7 +7,9 @@
package tokeninternal
import (
+ "fmt"
"go/token"
+ "sort"
"sync"
"unsafe"
)
@@ -57,3 +59,93 @@ func GetLines(file *token.File) []int {
panic("unexpected token.File size")
}
}
+
+// AddExistingFiles adds the specified files to the FileSet if they
+// are not already present. It panics if any pair of files in the
+// resulting FileSet would overlap.
+func AddExistingFiles(fset *token.FileSet, files []*token.File) {
+ // Punch through the FileSet encapsulation.
+ type tokenFileSet struct {
+ // This type remained essentially consistent from go1.16 to go1.21.
+ mutex sync.RWMutex
+ base int
+ files []*token.File
+ _ *token.File // changed to atomic.Pointer[token.File] in go1.19
+ }
+
+ // If the size of token.FileSet changes, this will fail to compile.
+ const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
+ var _ [-delta * delta]int
+
+ type uP = unsafe.Pointer
+ var ptr *tokenFileSet
+ *(*uP)(uP(&ptr)) = uP(fset)
+ ptr.mutex.Lock()
+ defer ptr.mutex.Unlock()
+
+ // Merge and sort.
+ newFiles := append(ptr.files, files...)
+ sort.Slice(newFiles, func(i, j int) bool {
+ return newFiles[i].Base() < newFiles[j].Base()
+ })
+
+ // Reject overlapping files.
+ // Discard adjacent identical files.
+ out := newFiles[:0]
+ for i, file := range newFiles {
+ if i > 0 {
+ prev := newFiles[i-1]
+ if file == prev {
+ continue
+ }
+ if prev.Base()+prev.Size()+1 > file.Base() {
+ panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
+ prev.Name(), prev.Base(), prev.Base()+prev.Size(),
+ file.Name(), file.Base(), file.Base()+file.Size()))
+ }
+ }
+ out = append(out, file)
+ }
+ newFiles = out
+
+ ptr.files = newFiles
+
+ // Advance FileSet.Base().
+ if len(newFiles) > 0 {
+ last := newFiles[len(newFiles)-1]
+ newBase := last.Base() + last.Size() + 1
+ if ptr.base < newBase {
+ ptr.base = newBase
+ }
+ }
+}
+
+// FileSetFor returns a new FileSet containing a sequence of new Files with
+// the same base, size, and line as the input files, for use in APIs that
+// require a FileSet.
+//
+// Precondition: the input files must be non-overlapping, and sorted in order
+// of their Base.
+func FileSetFor(files ...*token.File) *token.FileSet {
+ fset := token.NewFileSet()
+ for _, f := range files {
+ f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
+ lines := GetLines(f)
+ f2.SetLines(lines)
+ }
+ return fset
+}
+
+// CloneFileSet creates a new FileSet holding all files in fset. It does not
+// create copies of the token.Files in fset: they are added to the resulting
+// FileSet unmodified.
+func CloneFileSet(fset *token.FileSet) *token.FileSet {
+ var files []*token.File
+ fset.Iterate(func(f *token.File) bool {
+ files = append(files, f)
+ return true
+ })
+ newFileSet := token.NewFileSet()
+ AddExistingFiles(newFileSet, files)
+ return newFileSet
+}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 8e001134da..608aa6e1ac 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -20,10 +20,6 @@ How to get your contributions merged smoothly and quickly.
both author's & review's time is wasted. Create more PRs to address different
concerns and everyone will be happy.
-- For speculative changes, consider opening an issue and discussing it first. If
- you are suggesting a behavioral or API change, consider starting with a [gRFC
- proposal](https://github.com/grpc/proposal).
-
- If you are searching for features to work on, issues labeled [Status: Help
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
is a great place to start. These issues are well-documented and usually can be
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 8cd89dab90..ec2c2fa14d 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.30.0
// protoc v4.22.0
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index b9cc055075..3a76142424 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -244,19 +244,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- scSet := false
- if cc.dopts.scChan != nil {
- // Try to get an initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = &sc
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
- scSet = true
- }
- default:
- }
- }
if cc.dopts.bs == nil {
cc.dopts.bs = backoff.DefaultExponential
}
@@ -272,7 +259,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
- if cc.dopts.scChan != nil && !scSet {
+ if cc.dopts.scChan != nil {
// Blocking wait for the initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index e9d6852fd2..cdc8263bda 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -295,6 +295,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
// WithBlock returns a DialOption which makes callers of Dial block until the
// underlying connection is up. Without this, Dial returns immediately and
// connecting the server happens in background.
+//
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
func WithBlock() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
@@ -306,6 +309,9 @@ func WithBlock() DialOption {
// the context.DeadlineExceeded error.
// Implies WithBlock()
//
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
@@ -448,6 +454,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
// FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock().
//
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 04136882c7..3b17705ba0 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -79,7 +79,7 @@ var (
// XDSFederation indicates whether federation support is enabled, which can
// be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
- XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
+ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
// XDSRLS indicates whether processing of Cluster Specifier plugins and
// support for the RLS CLuster Specifier is enabled, which can be enabled by
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
new file mode 100644
index 0000000000..79993d3437
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+ "context"
+
+ "google.golang.org/grpc/internal/buffer"
+)
+
+// CallbackSerializer provides a mechanism to schedule callbacks in a
+// synchronized manner. It provides a FIFO guarantee on the order of execution
+// of scheduled callbacks. New callbacks can be scheduled by invoking the
+// Schedule() method.
+//
+// This type is safe for concurrent access.
+type CallbackSerializer struct {
+ callbacks *buffer.Unbounded
+}
+
+// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
+// context will be passed to the scheduled callbacks. Users should cancel the
+// provided context to shutdown the CallbackSerializer. It is guaranteed that no
+// callbacks will be executed once this context is canceled.
+func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
+ t := &CallbackSerializer{callbacks: buffer.NewUnbounded()}
+ go t.run(ctx)
+ return t
+}
+
+// Schedule adds a callback to be scheduled after existing callbacks are run.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) {
+ t.callbacks.Put(f)
+}
+
+func (t *CallbackSerializer) run(ctx context.Context) {
+ for ctx.Err() == nil {
+ select {
+ case <-ctx.Done():
+ return
+ case callback := <-t.callbacks.Get():
+ t.callbacks.Load()
+ callback.(func(ctx context.Context))(ctx)
+ }
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index c343c23a53..be5a9c81eb 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -30,6 +30,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/status"
)
@@ -488,12 +489,13 @@ type loopyWriter struct {
bdpEst *bdpEstimator
draining bool
conn net.Conn
+ logger *grpclog.PrefixLogger
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@@ -507,6 +509,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
hEnc: hpack.NewEncoder(&buf),
bdpEst: bdpEst,
conn: conn,
+ logger: logger,
}
return l
}
@@ -536,8 +539,8 @@ const minBatchSize = 1000
// left open to allow the I/O error to be encountered by the reader instead.
func (l *loopyWriter) run() (err error) {
defer func() {
- if logger.V(logLevel) {
- logger.Infof("transport: loopyWriter exiting with error: %v", err)
+ if l.logger.V(logLevel) {
+ l.logger.Infof("loopyWriter exiting with error: %v", err)
}
if !isIOError(err) {
l.framer.writer.Flush()
@@ -636,8 +639,8 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
- if logger.V(logLevel) {
- logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ if l.logger.V(logLevel) {
+ l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
}
return nil
}
@@ -692,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
- if logger.V(logLevel) {
- logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
+ if l.logger.V(logLevel) {
+ l.logger.Warningf("Encountered error while encoding headers: %v", err)
}
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index e6626bf96e..fbee581b86 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -39,6 +39,7 @@ import (
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@@ -83,6 +84,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentSubtype: contentSubtype,
stats: stats,
}
+ st.logger = prefixLoggerForServerHandlerTransport(st)
if v := r.Header.Get("grpc-timeout"); v != "" {
to, err := decodeTimeout(v)
@@ -150,13 +152,14 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats []stats.Handler
+ stats []stats.Handler
+ logger *grpclog.PrefixLogger
}
func (ht *serverHandlerTransport) Close(err error) {
ht.closeOnce.Do(func() {
- if logger.V(logLevel) {
- logger.Infof("Closing serverHandlerTransport: %v", err)
+ if ht.logger.V(logLevel) {
+ ht.logger.Infof("Closing: %v", err)
}
close(ht.closedCh)
})
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 9826feb8c6..5216998a88 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -38,6 +38,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
@@ -145,6 +146,7 @@ type http2Client struct {
bufferPool *bufferPool
connectionID uint64
+ logger *grpclog.PrefixLogger
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
@@ -244,7 +246,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if err := connectCtx.Err(); err != nil {
// connectCtx expired before exiting the function. Hard close the connection.
if logger.V(logLevel) {
- logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
+ logger.Infof("Aborting due to connect deadline expiring: %v", err)
}
conn.Close()
}
@@ -346,6 +348,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
bufferPool: newBufferPool(),
onClose: onClose,
}
+ t.logger = prefixLoggerForClientTransport(t)
// Add peer information to the http2client context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@@ -444,7 +447,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn)
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.run()
close(t.writerDone)
}()
@@ -782,7 +785,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
s.id = h.streamID
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.mu.Lock()
- if t.activeStreams == nil { // Can be niled from Close().
+ if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
t.mu.Unlock()
return false // Don't create a stream if the transport is already closed.
}
@@ -859,8 +862,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
}
}
if transportDrainRequired {
- if logger.V(logLevel) {
- logger.Infof("transport: t.nextID > MaxStreamID. Draining")
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
}
t.GracefulClose()
}
@@ -952,8 +955,8 @@ func (t *http2Client) Close(err error) {
t.mu.Unlock()
return
}
- if logger.V(logLevel) {
- logger.Infof("transport: closing: %v", err)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing: %v", err)
}
// Call t.onClose ASAP to prevent the client from attempting to create new
// streams.
@@ -1009,8 +1012,8 @@ func (t *http2Client) GracefulClose() {
t.mu.Unlock()
return
}
- if logger.V(logLevel) {
- logger.Infof("transport: GracefulClose called")
+ if t.logger.V(logLevel) {
+ t.logger.Infof("GracefulClose called")
}
t.onClose(GoAwayInvalid)
t.state = draining
@@ -1174,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
- if logger.V(logLevel) {
- logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
}
statusCode = codes.Unknown
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 99ae1a7374..4b406b8cb0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,7 +35,9 @@ import (
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/codes"
@@ -129,6 +131,8 @@ type http2Server struct {
// This lock may not be taken if mu is already held.
maxStreamMu sync.Mutex
maxStreamID uint32 // max stream ID ever seen
+
+ logger *grpclog.PrefixLogger
}
// NewServerTransport creates a http2 transport with conn and configuration
@@ -267,6 +271,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
+ t.logger = prefixLoggerForServerTransport(t)
// Add peer information to the http2server context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@@ -331,7 +336,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
t.loopy.run()
close(t.writerDone)
@@ -425,8 +430,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// "Transports must consider requests containing the Connection header
// as malformed." - A41
case "connection":
- if logger.V(logLevel) {
- logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
}
protocolError = true
default:
@@ -436,7 +441,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
v, err := decodeMetadataHeader(hf.Name, hf.Value)
if err != nil {
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
- logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
break
}
mdata[hf.Name] = append(mdata[hf.Name], v)
@@ -450,8 +455,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// error, this takes precedence over a client not speaking gRPC.
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
- if logger.V(logLevel) {
- logger.Errorf("transport: %v", errMsg)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
httpStatus: http.StatusBadRequest,
@@ -545,9 +550,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if httpMethod != http.MethodPost {
t.mu.Unlock()
- errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
- if logger.V(logLevel) {
- logger.Infof("transport: %v", errMsg)
+ errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
}
t.controlBuf.put(&earlyAbortStream{
httpStatus: 405,
@@ -563,8 +568,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
var err error
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
t.mu.Unlock()
- if logger.V(logLevel) {
- logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
}
stat, ok := status.FromError(err)
if !ok {
@@ -638,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
- if logger.V(logLevel) {
- logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ if t.logger.V(logLevel) {
+ t.logger.Warningf("Encountered http2.StreamError: %v", se)
}
t.mu.Lock()
s := t.activeStreams[se.StreamID]
@@ -682,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
- if logger.V(logLevel) {
- logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received unsupported frame type %T", frame)
}
}
}
@@ -942,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- if logger.V(logLevel) {
- logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
}
return false
}
@@ -1056,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
- logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+ t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
@@ -1171,8 +1176,8 @@ func (t *http2Server) keepalive() {
select {
case <-ageTimer.C:
// Close the connection after grace period.
- if logger.V(logLevel) {
- logger.Infof("transport: closing server transport due to maximum connection age.")
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing server transport due to maximum connection age")
}
t.controlBuf.put(closeConnection{})
case <-t.done:
@@ -1223,8 +1228,8 @@ func (t *http2Server) Close(err error) {
t.mu.Unlock()
return
}
- if logger.V(logLevel) {
- logger.Infof("transport: closing: %v", err)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing: %v", err)
}
t.state = closing
streams := t.activeStreams
@@ -1232,8 +1237,8 @@ func (t *http2Server) Close(err error) {
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
- if err := t.conn.Close(); err != nil && logger.V(logLevel) {
- logger.Infof("transport: error closing conn during Close: %v", err)
+ if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
+ t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
}
channelz.RemoveEntry(t.channelzID)
// Cancel all active streams.
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8fcae4f4d0..19cbb18f5a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -38,7 +38,6 @@ import (
"golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
@@ -86,7 +85,6 @@ var (
// 504 Gateway timeout - UNAVAILABLE.
http.StatusGatewayTimeout: codes.Unavailable,
}
- logger = grpclog.Component("transport")
)
// isReservedHeader checks whether hdr belongs to HTTP2 headers
diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go
new file mode 100644
index 0000000000..42ed2b07af
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/logging.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/grpclog"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+)
+
+var logger = grpclog.Component("transport")
+
+func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
+}
+
+func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
+}
+
+func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
+}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 087b9ad7c1..76d152a69c 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -43,7 +43,6 @@ import (
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
@@ -146,7 +145,7 @@ type Server struct {
channelzID *channelz.Identifier
czData *channelzData
- serverWorkerChannels []chan *serverWorkerData
+ serverWorkerChannel chan *serverWorkerData
}
type serverOptions struct {
@@ -561,40 +560,38 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
const serverWorkerResetThreshold = 1 << 16
// serverWorkers blocks on a *transport.Stream channel forever and waits for
-// data to be fed by serveStreams. This allows different requests to be
+// data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
//
// [1] https://github.com/golang/go/issues/18138
-func (s *Server) serverWorker(ch chan *serverWorkerData) {
- // To make sure all server workers don't reset at the same time, choose a
- // random number of iterations before resetting.
- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
- for completed := 0; completed < threshold; completed++ {
- data, ok := <-ch
+func (s *Server) serverWorker() {
+ for completed := 0; completed < serverWorkerResetThreshold; completed++ {
+ data, ok := <-s.serverWorkerChannel
if !ok {
return
}
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
- data.wg.Done()
+ s.handleSingleStream(data)
}
- go s.serverWorker(ch)
+ go s.serverWorker()
}
-// initServerWorkers creates worker goroutines and channels to process incoming
+func (s *Server) handleSingleStream(data *serverWorkerData) {
+ defer data.wg.Done()
+ s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
+}
+
+// initServerWorkers creates worker goroutines and a channel to process incoming
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
+ s.serverWorkerChannel = make(chan *serverWorkerData)
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
- s.serverWorkerChannels[i] = make(chan *serverWorkerData)
- go s.serverWorker(s.serverWorkerChannels[i])
+ go s.serverWorker()
}
}
func (s *Server) stopServerWorkers() {
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
- close(s.serverWorkerChannels[i])
- }
+ close(s.serverWorkerChannel)
}
// NewServer creates a gRPC server which has no service registered and has not
@@ -946,26 +943,21 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close(errors.New("finished serving streams for the server transport"))
var wg sync.WaitGroup
- var roundRobinCounter uint32
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
if s.opts.numServerWorkers > 0 {
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
select {
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
+ case s.serverWorkerChannel <- data:
+ return
default:
// If all stream workers are busy, fallback to the default code path.
- go func() {
- s.handleStream(st, stream, s.traceInfo(st, stream))
- wg.Done()
- }()
}
- } else {
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
}
+ go func() {
+ defer wg.Done()
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ }()
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 623be39f26..53910fb7c9 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -77,7 +77,9 @@ func FromProto(s *spb.Status) *Status {
// FromError returns a Status representation of err.
//
// - If err was produced by this package or implements the method `GRPCStatus()
-// *Status`, the appropriate Status is returned.
+// *Status`, or if err wraps a type satisfying this, the appropriate Status is
+// returned. For wrapped errors, the message returned contains the entire
+// err.Error() text and not just the wrapped status.
//
// - If err is nil, a Status is returned with codes.OK and no message.
//
@@ -88,10 +90,15 @@ func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus(), true
+ type grpcstatus interface{ GRPCStatus() *Status }
+ if gs, ok := err.(grpcstatus); ok {
+ return gs.GRPCStatus(), true
+ }
+ var gs grpcstatus
+ if errors.As(err, &gs) {
+ p := gs.GRPCStatus().Proto()
+ p.Message = err.Error()
+ return status.FromProto(p), true
}
return New(codes.Unknown, err.Error()), false
}
@@ -103,19 +110,16 @@ func Convert(err error) *Status {
return s
}
-// Code returns the Code of the error if it is a Status error, codes.OK if err
-// is nil, or codes.Unknown otherwise.
+// Code returns the Code of the error if it is a Status error or if it wraps a
+// Status error. If that is not the case, it returns codes.OK if err is nil, or
+// codes.Unknown otherwise.
func Code(err error) codes.Code {
// Don't use FromError to avoid allocation of OK status.
if err == nil {
return codes.OK
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus().Code()
- }
- return codes.Unknown
+
+ return Convert(err).Code()
}
// FromContextError converts a context error or wrapped context error into a
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 3c6e3c9118..853ce0e30f 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.54.0"
+const Version = "1.55.0"
diff --git a/vendor/k8s.io/klog/v2/.gitignore b/vendor/k8s.io/klog/v2/.gitignore
deleted file mode 100644
index 0aa2002392..0000000000
--- a/vendor/k8s.io/klog/v2/.gitignore
+++ /dev/null
@@ -1,17 +0,0 @@
-# OSX leaves these everywhere on SMB shares
-._*
-
-# OSX trash
-.DS_Store
-
-# Eclipse files
-.classpath
-.project
-.settings/**
-
-# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
-.idea/
-*.iml
-
-# Vscode files
-.vscode
diff --git a/vendor/k8s.io/klog/v2/CONTRIBUTING.md b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
deleted file mode 100644
index 2641b1f41b..0000000000
--- a/vendor/k8s.io/klog/v2/CONTRIBUTING.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Contributing Guidelines
-
-Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
-
-_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
-
-## Getting Started
-
-We have full documentation on how to get started contributing here:
-
-- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
-- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
-- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
-
-## Mentorship
-
-- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
-
-## Contact Information
-
-- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
-- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/k8s.io/klog/v2/LICENSE b/vendor/k8s.io/klog/v2/LICENSE
deleted file mode 100644
index 37ec93a14f..0000000000
--- a/vendor/k8s.io/klog/v2/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS
deleted file mode 100644
index a2fe8f351b..0000000000
--- a/vendor/k8s.io/klog/v2/OWNERS
+++ /dev/null
@@ -1,14 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-reviewers:
- - harshanarayana
- - pohly
-approvers:
- - dims
- - thockin
- - serathius
-emeritus_approvers:
- - brancz
- - justinsb
- - lavalamp
- - piosz
- - tallclair
diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md
deleted file mode 100644
index d45cbe1720..0000000000
--- a/vendor/k8s.io/klog/v2/README.md
+++ /dev/null
@@ -1,118 +0,0 @@
-klog
-====
-
-klog is a permanent fork of https://github.com/golang/glog.
-
-## Why was klog created?
-
-The decision to create klog was one that wasn't made lightly, but it was necessary due to some
-drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
-
-> The code in this repo [...] is not itself under development
-
-This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
-
- * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
- * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
- * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
-
-Historical context is available here:
-
- * https://github.com/kubernetes/kubernetes/issues/61006
- * https://github.com/kubernetes/kubernetes/issues/70264
- * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
- * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
-
-## Release versioning
-
-Semantic versioning is used in this repository. It contains several Go modules
-with different levels of stability:
-- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags
-- `examples` - no stable API, no tags, no intention to ever stabilize
-
-Exempt from the API stability guarantee are items (packages, functions, etc.)
-which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those
-may still change in incompatible ways or get removed entirely. This can only
-be used for code that is used in tests to avoid situations where non-test
-code from two different Kubernetes dependencies depends on incompatible
-releases of klog because an experimental API was changed.
-
-----
-
-How to use klog
-===============
-- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"`
-- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
-- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
-- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
-- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md))
-- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog).
-
-**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater.
-
-### Coexisting with klog/v2
-
-See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2.
-
-### Coexisting with glog
-This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
-
-## Community, discussion, contribution, and support
-
-Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
-
-You can reach the maintainers of this project at:
-
-- [Slack](https://kubernetes.slack.com/messages/klog)
-- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
-
-### Code of conduct
-
-Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
-
-----
-
-glog
-====
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package
- https://github.com/google/glog
-
-By binding methods to booleans it is possible to use the log package
-without paying the expense of evaluating the arguments to the log.
-Through the -vmodule flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from glog.go introduces the ideas:
-
- Package glog implements logging analogous to the Google-internal
- C++ INFO/ERROR/V setup. It provides functions Info, Warning,
- Error, Fatal, plus formatting variants such as Infof. It
- also provides V-style logging controlled by the -v and
- -vmodule=file=2 flags.
-
- Basic examples:
-
- glog.Info("Prepare to repel boarders")
-
- glog.Fatalf("Initialization failed: %s", err)
-
- See the documentation of the V function for an explanation
- of these examples:
-
- if glog.V(2) {
- glog.Info("Starting transaction...")
- }
-
- glog.V(2).Infoln("Processed", nItems, "elements")
-
-
-The repository contains an open source version of the log package
-used inside Google. The master copy of the source lives inside
-Google, not here. The code in this repo is for export only and is not itself
-under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/v2/RELEASE.md b/vendor/k8s.io/klog/v2/RELEASE.md
deleted file mode 100644
index b53eb960ce..0000000000
--- a/vendor/k8s.io/klog/v2/RELEASE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Release Process
-
-The `klog` is released on an as-needed basis. The process is as follows:
-
-1. An issue is proposing a new release with a changelog since the last release
-1. All [OWNERS](OWNERS) must LGTM this release
-1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
-1. The release issue is closed
-1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md
deleted file mode 100644
index 2083d44cdf..0000000000
--- a/vendor/k8s.io/klog/v2/SECURITY.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Security Policy
-
-## Security Announcements
-
-Join the [kubernetes-security-announce] group for security and vulnerability announcements.
-
-You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss].
-
-## Reporting a Vulnerability
-
-Instructions for reporting a vulnerability can be found on the
-[Kubernetes Security and Disclosure Information] page.
-
-## Supported Versions
-
-Information about supported Kubernetes versions can be found on the
-[Kubernetes version and version skew support policy] page on the Kubernetes website.
-
-[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce
-[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50
-[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
-[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability
diff --git a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
deleted file mode 100644
index 6128a58699..0000000000
--- a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defined below are the security contacts for this repo.
-#
-# They are the contact point for the Product Security Committee to reach out
-# to for triaging and handling of incoming issues.
-#
-# The below names agree to abide by the
-# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
-# and will be removed and replaced if they violate that agreement.
-#
-# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
-# INSTRUCTIONS AT https://kubernetes.io/security/
-
-dims
-thockin
-justinsb
-tallclair
-piosz
-brancz
-DirectXMan12
-lavalamp
diff --git a/vendor/k8s.io/klog/v2/code-of-conduct.md b/vendor/k8s.io/klog/v2/code-of-conduct.md
deleted file mode 100644
index 0d15c00cf3..0000000000
--- a/vendor/k8s.io/klog/v2/code-of-conduct.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Kubernetes Community Code of Conduct
-
-Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go
deleted file mode 100644
index 005513f2a7..0000000000
--- a/vendor/k8s.io/klog/v2/contextual.go
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package klog
-
-import (
- "context"
-
- "github.com/go-logr/logr"
-)
-
-// This file provides the implementation of
-// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging
-//
-// SetLogger and ClearLogger were originally added to klog.go and got moved
-// here. Contextual logging adds a way to retrieve a Logger for direct logging
-// without the logging calls in klog.go.
-//
-// The global variables are expected to be modified only during sequential
-// parts of a program (init, serial tests) and therefore are not protected by
-// mutex locking.
-
-var (
- // klogLogger is used as fallback for logging through the normal klog code
- // when no Logger is set.
- klogLogger logr.Logger = logr.New(&klogger{})
-)
-
-// SetLogger sets a Logger implementation that will be used as backing
-// implementation of the traditional klog log calls. klog will do its own
-// verbosity checks before calling logger.V().Info. logger.Error is always
-// called, regardless of the klog verbosity settings.
-//
-// If set, all log lines will be suppressed from the regular output, and
-// redirected to the logr implementation.
-// Use as:
-//
-// ...
-// klog.SetLogger(zapr.NewLogger(zapLog))
-//
-// To remove a backing logr implemention, use ClearLogger. Setting an
-// empty logger with SetLogger(logr.Logger{}) does not work.
-//
-// Modifying the logger is not thread-safe and should be done while no other
-// goroutines invoke log calls, usually during program initialization.
-func SetLogger(logger logr.Logger) {
- SetLoggerWithOptions(logger)
-}
-
-// SetLoggerWithOptions is a more flexible version of SetLogger. Without
-// additional options, it behaves exactly like SetLogger. By passing
-// ContextualLogger(true) as option, it can be used to set a logger that then
-// will also get called directly by applications which retrieve it via
-// FromContext, Background, or TODO.
-//
-// Supporting direct calls is recommended because it avoids the overhead of
-// routing log entries through klogr into klog and then into the actual Logger
-// backend.
-func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
- logging.loggerOptions = loggerOptions{}
- for _, opt := range opts {
- opt(&logging.loggerOptions)
- }
- logging.logger = &logWriter{
- Logger: logger,
- writeKlogBuffer: logging.loggerOptions.writeKlogBuffer,
- }
-}
-
-// ContextualLogger determines whether the logger passed to
-// SetLoggerWithOptions may also get called directly. Such a logger cannot rely
-// on verbosity checking in klog.
-func ContextualLogger(enabled bool) LoggerOption {
- return func(o *loggerOptions) {
- o.contextualLogger = enabled
- }
-}
-
-// FlushLogger provides a callback for flushing data buffered by the logger.
-func FlushLogger(flush func()) LoggerOption {
- return func(o *loggerOptions) {
- o.flush = flush
- }
-}
-
-// WriteKlogBuffer sets a callback that will be invoked by klog to write output
-// produced by non-structured log calls like Infof.
-//
-// The buffer will contain exactly the same data that klog normally would write
-// into its own output stream(s). In particular this includes the header, if
-// klog is configured to write one. The callback then can divert that data into
-// its own output streams. The buffer may or may not end in a line break.
-//
-// Without such a callback, klog will call the logger's Info or Error method
-// with just the message string (i.e. no header).
-func WriteKlogBuffer(write func([]byte)) LoggerOption {
- return func(o *loggerOptions) {
- o.writeKlogBuffer = write
- }
-}
-
-// LoggerOption implements the functional parameter paradigm for
-// SetLoggerWithOptions.
-type LoggerOption func(o *loggerOptions)
-
-type loggerOptions struct {
- contextualLogger bool
- flush func()
- writeKlogBuffer func([]byte)
-}
-
-// logWriter combines a logger (always set) with a write callback (optional).
-type logWriter struct {
- Logger
- writeKlogBuffer func([]byte)
-}
-
-// ClearLogger removes a backing Logger implementation if one was set earlier
-// with SetLogger.
-//
-// Modifying the logger is not thread-safe and should be done while no other
-// goroutines invoke log calls, usually during program initialization.
-func ClearLogger() {
- logging.logger = nil
- logging.loggerOptions = loggerOptions{}
-}
-
-// EnableContextualLogging controls whether contextual logging is enabled.
-// By default it is enabled. When disabled, FromContext avoids looking up
-// the logger in the context and always returns the global logger.
-// LoggerWithValues, LoggerWithName, and NewContext become no-ops
-// and return their input logger respectively context. This may be useful
-// to avoid the additional overhead for contextual logging.
-//
-// This must be called during initialization before goroutines are started.
-func EnableContextualLogging(enabled bool) {
- logging.contextualLoggingEnabled = enabled
-}
-
-// FromContext retrieves a logger set by the caller or, if not set,
-// falls back to the program's global logger (a Logger instance or klog
-// itself).
-func FromContext(ctx context.Context) Logger {
- if logging.contextualLoggingEnabled {
- if logger, err := logr.FromContext(ctx); err == nil {
- return logger
- }
- }
-
- return Background()
-}
-
-// TODO can be used as a last resort by code that has no means of
-// receiving a logger from its caller. FromContext or an explicit logger
-// parameter should be used instead.
-func TODO() Logger {
- return Background()
-}
-
-// Background retrieves the fallback logger. It should not be called before
-// that logger was initialized by the program and not by code that should
-// better receive a logger via its parameters. TODO can be used as a temporary
-// solution for such code.
-func Background() Logger {
- if logging.loggerOptions.contextualLogger {
- // Is non-nil because logging.loggerOptions.contextualLogger is
- // only true if a logger was set.
- return logging.logger.Logger
- }
-
- return klogLogger
-}
-
-// LoggerWithValues returns logger.WithValues(...kv) when
-// contextual logging is enabled, otherwise the logger.
-func LoggerWithValues(logger Logger, kv ...interface{}) Logger {
- if logging.contextualLoggingEnabled {
- return logger.WithValues(kv...)
- }
- return logger
-}
-
-// LoggerWithName returns logger.WithName(name) when contextual logging is
-// enabled, otherwise the logger.
-func LoggerWithName(logger Logger, name string) Logger {
- if logging.contextualLoggingEnabled {
- return logger.WithName(name)
- }
- return logger
-}
-
-// NewContext returns logr.NewContext(ctx, logger) when
-// contextual logging is enabled, otherwise ctx.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- if logging.contextualLoggingEnabled {
- return logr.NewContext(ctx, logger)
- }
- return ctx
-}
diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go
deleted file mode 100644
index 320a147728..0000000000
--- a/vendor/k8s.io/klog/v2/exit.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-// Copyright 2022 The Kubernetes Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package klog
-
-import (
- "fmt"
- "os"
- "time"
-)
-
-var (
-
- // ExitFlushTimeout is the timeout that klog has traditionally used during
- // calls like Fatal or Exit when flushing log data right before exiting.
- // Applications that replace those calls and do not have some specific
- // requirements like "exit immediately" can use this value as parameter
- // for FlushAndExit.
- //
- // Can be set for testing purpose or to change the application's
- // default.
- ExitFlushTimeout = 10 * time.Second
-
- // OsExit is the function called by FlushAndExit to terminate the program.
- //
- // Can be set for testing purpose or to change the application's
- // default behavior. Note that the function should not simply return
- // because callers of functions like Fatal will not expect that.
- OsExit = os.Exit
-)
-
-// FlushAndExit flushes log data for a certain amount of time and then calls
-// os.Exit. Combined with some logging call it provides a replacement for
-// traditional calls like Fatal or Exit.
-func FlushAndExit(flushTimeout time.Duration, exitCode int) {
- timeoutFlush(flushTimeout)
- OsExit(exitCode)
-}
-
-// timeoutFlush calls Flush and returns when it completes or after timeout
-// elapses, whichever happens first. This is needed because the hooks invoked
-// by Flush may deadlock when klog.Fatal is called from a hook that holds
-// a lock. Flushing also might take too long.
-func timeoutFlush(timeout time.Duration) {
- done := make(chan bool, 1)
- go func() {
- Flush() // calls logging.lockAndFlushAll()
- done <- true
- }()
- select {
- case <-done:
- case <-time.After(timeout):
- fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
- }
-}
diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go
deleted file mode 100644
index 602c3ed9e6..0000000000
--- a/vendor/k8s.io/klog/v2/imports.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package klog
-
-import (
- "github.com/go-logr/logr"
-)
-
-// The reason for providing these aliases is to allow code to work with logr
-// without directly importing it.
-
-// Logger in this package is exactly the same as logr.Logger.
-type Logger = logr.Logger
-
-// LogSink in this package is exactly the same as logr.LogSink.
-type LogSink = logr.LogSink
-
-// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo.
-type RuntimeInfo = logr.RuntimeInfo
-
-var (
- // New is an alias for logr.New.
- New = logr.New
-)
diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
deleted file mode 100644
index f325ded5e9..0000000000
--- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2013 Google Inc. All Rights Reserved.
-// Copyright 2022 The Kubernetes Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package buffer provides a cache for byte.Buffer instances that can be reused
-// to avoid frequent allocation and deallocation. It also has utility code
-// for log header formatting that use these buffers.
-package buffer
-
-import (
- "bytes"
- "os"
- "sync"
- "time"
-
- "k8s.io/klog/v2/internal/severity"
-)
-
-var (
- // Pid is inserted into log headers. Can be overridden for tests.
- Pid = os.Getpid()
-)
-
-// Buffer holds a single byte.Buffer for reuse. The zero value is ready for
-// use. It also provides some helper methods for output formatting.
-type Buffer struct {
- bytes.Buffer
- Tmp [64]byte // temporary byte array for creating headers.
- next *Buffer
-}
-
-var buffers = sync.Pool{
- New: func() interface{} {
- return new(Buffer)
- },
-}
-
-// GetBuffer returns a new, ready-to-use buffer.
-func GetBuffer() *Buffer {
- b := buffers.Get().(*Buffer)
- b.Reset()
- return b
-}
-
-// PutBuffer returns a buffer to the free list.
-func PutBuffer(b *Buffer) {
- if b.Len() >= 256 {
- // Let big buffers die a natural death, without relying on
- // sync.Pool behavior. The documentation implies that items may
- // get deallocated while stored there ("If the Pool holds the
- // only reference when this [= be removed automatically]
- // happens, the item might be deallocated."), but
- // https://github.com/golang/go/issues/23199 leans more towards
- // having such a size limit.
- return
- }
-
- buffers.Put(b)
-}
-
-// Some custom tiny helper functions to print the log header efficiently.
-
-const digits = "0123456789"
-
-// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i].
-func (buf *Buffer) twoDigits(i, d int) {
- buf.Tmp[i+1] = digits[d%10]
- d /= 10
- buf.Tmp[i] = digits[d%10]
-}
-
-// nDigits formats an n-digit integer at buf.Tmp[i],
-// padding with pad on the left.
-// It assumes d >= 0.
-func (buf *Buffer) nDigits(n, i, d int, pad byte) {
- j := n - 1
- for ; j >= 0 && d > 0; j-- {
- buf.Tmp[i+j] = digits[d%10]
- d /= 10
- }
- for ; j >= 0; j-- {
- buf.Tmp[i+j] = pad
- }
-}
-
-// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i].
-func (buf *Buffer) someDigits(i, d int) int {
- // Print into the top, then copy down. We know there's space for at least
- // a 10-digit number.
- j := len(buf.Tmp)
- for {
- j--
- buf.Tmp[j] = digits[d%10]
- d /= 10
- if d == 0 {
- break
- }
- }
- return copy(buf.Tmp[i:], buf.Tmp[j:])
-}
-
-// FormatHeader formats a log header using the provided file name and line number
-// and writes it into the buffer.
-func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) {
- if line < 0 {
- line = 0 // not a real line number, but acceptable to someDigits
- }
- if s > severity.FatalLog {
- s = severity.InfoLog // for safety.
- }
-
- // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
- // It's worth about 3X. Fprintf is hard.
- _, month, day := now.Date()
- hour, minute, second := now.Clock()
- // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
- buf.Tmp[0] = severity.Char[s]
- buf.twoDigits(1, int(month))
- buf.twoDigits(3, day)
- buf.Tmp[5] = ' '
- buf.twoDigits(6, hour)
- buf.Tmp[8] = ':'
- buf.twoDigits(9, minute)
- buf.Tmp[11] = ':'
- buf.twoDigits(12, second)
- buf.Tmp[14] = '.'
- buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
- buf.Tmp[21] = ' '
- buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID
- buf.Tmp[29] = ' '
- buf.Write(buf.Tmp[:30])
- buf.WriteString(file)
- buf.Tmp[0] = ':'
- n := buf.someDigits(1, line)
- buf.Tmp[n+1] = ']'
- buf.Tmp[n+2] = ' '
- buf.Write(buf.Tmp[:n+3])
-}
-
-// SprintHeader formats a log header and returns a string. This is a simpler
-// version of FormatHeader for use in ktesting.
-func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string {
- if s > severity.FatalLog {
- s = severity.InfoLog // for safety.
- }
-
- // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
- // It's worth about 3X. Fprintf is hard.
- _, month, day := now.Date()
- hour, minute, second := now.Clock()
- // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
- buf.Tmp[0] = severity.Char[s]
- buf.twoDigits(1, int(month))
- buf.twoDigits(3, day)
- buf.Tmp[5] = ' '
- buf.twoDigits(6, hour)
- buf.Tmp[8] = ':'
- buf.twoDigits(9, minute)
- buf.Tmp[11] = ':'
- buf.twoDigits(12, second)
- buf.Tmp[14] = '.'
- buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
- buf.Tmp[21] = ']'
- return string(buf.Tmp[:22])
-}
diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md
deleted file mode 100644
index 03d692c8f8..0000000000
--- a/vendor/k8s.io/klog/v2/internal/clock/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Clock
-
-This package provides an interface for time-based operations. It allows
-mocking time for testing.
-
-This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular
-dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog).
diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go
deleted file mode 100644
index b8b6af5c81..0000000000
--- a/vendor/k8s.io/klog/v2/internal/clock/clock.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clock
-
-import "time"
-
-// PassiveClock allows for injecting fake or real clocks into code
-// that needs to read the current time but does not support scheduling
-// activity in the future.
-type PassiveClock interface {
- Now() time.Time
- Since(time.Time) time.Duration
-}
-
-// Clock allows for injecting fake or real clocks into code that
-// needs to do arbitrary things based on time.
-type Clock interface {
- PassiveClock
- // After returns the channel of a new Timer.
- // This method does not allow to free/GC the backing timer before it fires. Use
- // NewTimer instead.
- After(d time.Duration) <-chan time.Time
- // NewTimer returns a new Timer.
- NewTimer(d time.Duration) Timer
- // Sleep sleeps for the provided duration d.
- // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
- Sleep(d time.Duration)
- // Tick returns the channel of a new Ticker.
- // This method does not allow to free/GC the backing ticker. Use
- // NewTicker from WithTicker instead.
- Tick(d time.Duration) <-chan time.Time
-}
-
-// WithTicker allows for injecting fake or real clocks into code that
-// needs to do arbitrary things based on time.
-type WithTicker interface {
- Clock
- // NewTicker returns a new Ticker.
- NewTicker(time.Duration) Ticker
-}
-
-// WithDelayedExecution allows for injecting fake or real clocks into
-// code that needs to make use of AfterFunc functionality.
-type WithDelayedExecution interface {
- Clock
- // AfterFunc executes f in its own goroutine after waiting
- // for d duration and returns a Timer whose channel can be
- // closed by calling Stop() on the Timer.
- AfterFunc(d time.Duration, f func()) Timer
-}
-
-// WithTickerAndDelayedExecution allows for injecting fake or real clocks
-// into code that needs Ticker and AfterFunc functionality
-type WithTickerAndDelayedExecution interface {
- WithTicker
- // AfterFunc executes f in its own goroutine after waiting
- // for d duration and returns a Timer whose channel can be
- // closed by calling Stop() on the Timer.
- AfterFunc(d time.Duration, f func()) Timer
-}
-
-// Ticker defines the Ticker interface.
-type Ticker interface {
- C() <-chan time.Time
- Stop()
-}
-
-var _ = WithTicker(RealClock{})
-
-// RealClock really calls time.Now()
-type RealClock struct{}
-
-// Now returns the current time.
-func (RealClock) Now() time.Time {
- return time.Now()
-}
-
-// Since returns time since the specified timestamp.
-func (RealClock) Since(ts time.Time) time.Duration {
- return time.Since(ts)
-}
-
-// After is the same as time.After(d).
-// This method does not allow to free/GC the backing timer before it fires. Use
-// NewTimer instead.
-func (RealClock) After(d time.Duration) <-chan time.Time {
- return time.After(d)
-}
-
-// NewTimer is the same as time.NewTimer(d)
-func (RealClock) NewTimer(d time.Duration) Timer {
- return &realTimer{
- timer: time.NewTimer(d),
- }
-}
-
-// AfterFunc is the same as time.AfterFunc(d, f).
-func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
- return &realTimer{
- timer: time.AfterFunc(d, f),
- }
-}
-
-// Tick is the same as time.Tick(d)
-// This method does not allow to free/GC the backing ticker. Use
-// NewTicker instead.
-func (RealClock) Tick(d time.Duration) <-chan time.Time {
- return time.Tick(d)
-}
-
-// NewTicker returns a new Ticker.
-func (RealClock) NewTicker(d time.Duration) Ticker {
- return &realTicker{
- ticker: time.NewTicker(d),
- }
-}
-
-// Sleep is the same as time.Sleep(d)
-// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
-func (RealClock) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// Timer allows for injecting fake or real timers into code that
-// needs to do arbitrary things based on time.
-type Timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-var _ = Timer(&realTimer{})
-
-// realTimer is backed by an actual time.Timer.
-type realTimer struct {
- timer *time.Timer
-}
-
-// C returns the underlying timer's channel.
-func (r *realTimer) C() <-chan time.Time {
- return r.timer.C
-}
-
-// Stop calls Stop() on the underlying timer.
-func (r *realTimer) Stop() bool {
- return r.timer.Stop()
-}
-
-// Reset calls Reset() on the underlying timer.
-func (r *realTimer) Reset(d time.Duration) bool {
- return r.timer.Reset(d)
-}
-
-type realTicker struct {
- ticker *time.Ticker
-}
-
-func (r *realTicker) C() <-chan time.Time {
- return r.ticker.C
-}
-
-func (r *realTicker) Stop() {
- r.ticker.Stop()
-}
diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
deleted file mode 100644
index f27bd14472..0000000000
--- a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package dbg provides some helper code for call traces.
-package dbg
-
-import (
- "runtime"
-)
-
-// Stacks is a wrapper for runtime.Stack that attempts to recover the data for
-// all goroutines or the calling one.
-func Stacks(all bool) []byte {
- // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
- n := 10000
- if all {
- n = 100000
- }
- var trace []byte
- for i := 0; i < 5; i++ {
- trace = make([]byte, n)
- nbytes := runtime.Stack(trace, all)
- if nbytes < len(trace) {
- return trace[:nbytes]
- }
- n *= 2
- }
- return trace
-}
diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
deleted file mode 100644
index 1dc81a15fa..0000000000
--- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serialize
-
-import (
- "bytes"
- "fmt"
- "strconv"
-
- "github.com/go-logr/logr"
-)
-
-type textWriter interface {
- WriteText(*bytes.Buffer)
-}
-
-// WithValues implements LogSink.WithValues. The old key/value pairs are
-// assumed to be well-formed, the new ones are checked and padded if
-// necessary. It returns a new slice.
-func WithValues(oldKV, newKV []interface{}) []interface{} {
- if len(newKV) == 0 {
- return oldKV
- }
- newLen := len(oldKV) + len(newKV)
- hasMissingValue := newLen%2 != 0
- if hasMissingValue {
- newLen++
- }
- // The new LogSink must have its own slice.
- kv := make([]interface{}, 0, newLen)
- kv = append(kv, oldKV...)
- kv = append(kv, newKV...)
- if hasMissingValue {
- kv = append(kv, missingValue)
- }
- return kv
-}
-
-// MergeKVs deduplicates elements provided in two key/value slices.
-//
-// Keys in each slice are expected to be unique, so duplicates can only occur
-// when the first and second slice contain the same key. When that happens, the
-// key/value pair from the second slice is used. The first slice must be well-formed
-// (= even key/value pairs). The second one may have a missing value, in which
-// case the special "missing value" is added to the result.
-func MergeKVs(first, second []interface{}) []interface{} {
- maxLength := len(first) + (len(second)+1)/2*2
- if maxLength == 0 {
- // Nothing to do at all.
- return nil
- }
-
- if len(first) == 0 && len(second)%2 == 0 {
- // Nothing to be overridden, second slice is well-formed
- // and can be used directly.
- return second
- }
-
- // Determine which keys are in the second slice so that we can skip
- // them when iterating over the first one. The code intentionally
- // favors performance over completeness: we assume that keys are string
- // constants and thus compare equal when the string values are equal. A
- // string constant being overridden by, for example, a fmt.Stringer is
- // not handled.
- overrides := map[interface{}]bool{}
- for i := 0; i < len(second); i += 2 {
- overrides[second[i]] = true
- }
- merged := make([]interface{}, 0, maxLength)
- for i := 0; i+1 < len(first); i += 2 {
- key := first[i]
- if overrides[key] {
- continue
- }
- merged = append(merged, key, first[i+1])
- }
- merged = append(merged, second...)
- if len(merged)%2 != 0 {
- merged = append(merged, missingValue)
- }
- return merged
-}
-
-type Formatter struct {
- AnyToStringHook AnyToStringFunc
-}
-
-type AnyToStringFunc func(v interface{}) string
-
-// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
-// pairs into a buffer.
-func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
- if len(first) == 0 && len(second) == 0 {
- // Nothing to do at all.
- return
- }
-
- if len(first) == 0 && len(second)%2 == 0 {
- // Nothing to be overridden, second slice is well-formed
- // and can be used directly.
- for i := 0; i < len(second); i += 2 {
- f.KVFormat(b, second[i], second[i+1])
- }
- return
- }
-
- // Determine which keys are in the second slice so that we can skip
- // them when iterating over the first one. The code intentionally
- // favors performance over completeness: we assume that keys are string
- // constants and thus compare equal when the string values are equal. A
- // string constant being overridden by, for example, a fmt.Stringer is
- // not handled.
- overrides := map[interface{}]bool{}
- for i := 0; i < len(second); i += 2 {
- overrides[second[i]] = true
- }
- for i := 0; i < len(first); i += 2 {
- key := first[i]
- if overrides[key] {
- continue
- }
- f.KVFormat(b, key, first[i+1])
- }
- // Round down.
- l := len(second)
- l = l / 2 * 2
- for i := 1; i < l; i += 2 {
- f.KVFormat(b, second[i-1], second[i])
- }
- if len(second)%2 == 1 {
- f.KVFormat(b, second[len(second)-1], missingValue)
- }
-}
-
-func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
- Formatter{}.MergeAndFormatKVs(b, first, second)
-}
-
-const missingValue = "(MISSING)"
-
-// KVListFormat serializes all key/value pairs into the provided buffer.
-// A space gets inserted before the first pair and between each pair.
-func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
- for i := 0; i < len(keysAndValues); i += 2 {
- var v interface{}
- k := keysAndValues[i]
- if i+1 < len(keysAndValues) {
- v = keysAndValues[i+1]
- } else {
- v = missingValue
- }
- f.KVFormat(b, k, v)
- }
-}
-
-func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
- Formatter{}.KVListFormat(b, keysAndValues...)
-}
-
-// KVFormat serializes one key/value pair into the provided buffer.
-// A space gets inserted before the pair.
-func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
- b.WriteByte(' ')
- // Keys are assumed to be well-formed according to
- // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
- // for the sake of performance. Keys with spaces,
- // special characters, etc. will break parsing.
- if sK, ok := k.(string); ok {
- // Avoid one allocation when the key is a string, which
- // normally it should be.
- b.WriteString(sK)
- } else {
- b.WriteString(fmt.Sprintf("%s", k))
- }
-
- // The type checks are sorted so that more frequently used ones
- // come first because that is then faster in the common
- // cases. In Kubernetes, ObjectRef (a Stringer) is more common
- // than plain strings
- // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
- switch v := v.(type) {
- case textWriter:
- writeTextWriterValue(b, v)
- case fmt.Stringer:
- writeStringValue(b, true, StringerToString(v))
- case string:
- writeStringValue(b, true, v)
- case error:
- writeStringValue(b, true, ErrorToString(v))
- case logr.Marshaler:
- value := MarshalerToValue(v)
- // A marshaler that returns a string is useful for
- // delayed formatting of complex values. We treat this
- // case like a normal string. This is useful for
- // multi-line support.
- //
- // We could do this by recursively formatting a value,
- // but that comes with the risk of infinite recursion
- // if a marshaler returns itself. Instead we call it
- // only once and rely on it returning the intended
- // value directly.
- switch value := value.(type) {
- case string:
- writeStringValue(b, true, value)
- default:
- writeStringValue(b, false, f.AnyToString(value))
- }
- case []byte:
- // In https://github.com/kubernetes/klog/pull/237 it was decided
- // to format byte slices with "%+q". The advantages of that are:
- // - readable output if the bytes happen to be printable
- // - non-printable bytes get represented as unicode escape
- // sequences (\uxxxx)
- //
- // The downsides are that we cannot use the faster
- // strconv.Quote here and that multi-line output is not
- // supported. If developers know that a byte array is
- // printable and they want multi-line output, they can
- // convert the value to string before logging it.
- b.WriteByte('=')
- b.WriteString(fmt.Sprintf("%+q", v))
- default:
- writeStringValue(b, false, f.AnyToString(v))
- }
-}
-
-func KVFormat(b *bytes.Buffer, k, v interface{}) {
- Formatter{}.KVFormat(b, k, v)
-}
-
-// AnyToString is the historic fallback formatter.
-func (f Formatter) AnyToString(v interface{}) string {
- if f.AnyToStringHook != nil {
- return f.AnyToStringHook(v)
- }
- return fmt.Sprintf("%+v", v)
-}
-
-// StringerToString converts a Stringer to a string,
-// handling panics if they occur.
-func StringerToString(s fmt.Stringer) (ret string) {
- defer func() {
- if err := recover(); err != nil {
- ret = fmt.Sprintf("", err)
- }
- }()
- ret = s.String()
- return
-}
-
-// MarshalerToValue invokes a marshaler and catches
-// panics.
-func MarshalerToValue(m logr.Marshaler) (ret interface{}) {
- defer func() {
- if err := recover(); err != nil {
- ret = fmt.Sprintf("", err)
- }
- }()
- ret = m.MarshalLog()
- return
-}
-
-// ErrorToString converts an error to a string,
-// handling panics if they occur.
-func ErrorToString(err error) (ret string) {
- defer func() {
- if err := recover(); err != nil {
- ret = fmt.Sprintf("", err)
- }
- }()
- ret = err.Error()
- return
-}
-
-func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
- b.WriteRune('=')
- defer func() {
- if err := recover(); err != nil {
- fmt.Fprintf(b, `""`, err)
- }
- }()
- v.WriteText(b)
-}
-
-func writeStringValue(b *bytes.Buffer, quote bool, v string) {
- data := []byte(v)
- index := bytes.IndexByte(data, '\n')
- if index == -1 {
- b.WriteByte('=')
- if quote {
- // Simple string, quote quotation marks and non-printable characters.
- b.WriteString(strconv.Quote(v))
- return
- }
- // Non-string with no line breaks.
- b.WriteString(v)
- return
- }
-
- // Complex multi-line string, show as-is with indention like this:
- // I... "hello world" key=<
- // line 1
- // line 2
- // >
- //
- // Tabs indent the lines of the value while the end of string delimiter
- // is indented with a space. That has two purposes:
- // - visual difference between the two for a human reader because indention
- // will be different
- // - no ambiguity when some value line starts with the end delimiter
- //
- // One downside is that the output cannot distinguish between strings that
- // end with a line break and those that don't because the end delimiter
- // will always be on the next line.
- b.WriteString("=<\n")
- for index != -1 {
- b.WriteByte('\t')
- b.Write(data[0 : index+1])
- data = data[index+1:]
- index = bytes.IndexByte(data, '\n')
- }
- if len(data) == 0 {
- // String ended with line break, don't add another.
- b.WriteString(" >")
- } else {
- // No line break at end of last line, write rest of string and
- // add one.
- b.WriteByte('\t')
- b.Write(data)
- b.WriteString("\n >")
- }
-}
diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go
deleted file mode 100644
index 30fa1834f0..0000000000
--- a/vendor/k8s.io/klog/v2/internal/severity/severity.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 Google Inc. All Rights Reserved.
-// Copyright 2022 The Kubernetes Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package severity provides definitions for klog severity (info, warning, ...)
-package severity
-
-import (
- "strings"
-)
-
-// severity identifies the sort of log: info, warning etc. The binding to flag.Value
-// is handled in klog.go
-type Severity int32 // sync/atomic int32
-
-// These constants identify the log levels in order of increasing severity.
-// A message written to a high-severity log file is also written to each
-// lower-severity log file.
-const (
- InfoLog Severity = iota
- WarningLog
- ErrorLog
- FatalLog
- NumSeverity = 4
-)
-
-// Char contains one shortcut letter per severity level.
-const Char = "IWEF"
-
-// Name contains one name per severity level.
-var Name = []string{
- InfoLog: "INFO",
- WarningLog: "WARNING",
- ErrorLog: "ERROR",
- FatalLog: "FATAL",
-}
-
-// ByName looks up a severity level by name.
-func ByName(s string) (Severity, bool) {
- s = strings.ToUpper(s)
- for i, name := range Name {
- if name == s {
- return Severity(i), true
- }
- }
- return 0, false
-}
diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go
deleted file mode 100644
index ecd3f8b690..0000000000
--- a/vendor/k8s.io/klog/v2/k8s_references.go
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package klog
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strings"
-
- "github.com/go-logr/logr"
-)
-
-// ObjectRef references a kubernetes object
-type ObjectRef struct {
- Name string `json:"name"`
- Namespace string `json:"namespace,omitempty"`
-}
-
-func (ref ObjectRef) String() string {
- if ref.Namespace != "" {
- var builder strings.Builder
- builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
- builder.WriteString(ref.Namespace)
- builder.WriteRune('/')
- builder.WriteString(ref.Name)
- return builder.String()
- }
- return ref.Name
-}
-
-func (ref ObjectRef) WriteText(out *bytes.Buffer) {
- out.WriteRune('"')
- ref.writeUnquoted(out)
- out.WriteRune('"')
-}
-
-func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
- if ref.Namespace != "" {
- out.WriteString(ref.Namespace)
- out.WriteRune('/')
- }
- out.WriteString(ref.Name)
-}
-
-// MarshalLog ensures that loggers with support for structured output will log
-// as a struct by removing the String method via a custom type.
-func (ref ObjectRef) MarshalLog() interface{} {
- type or ObjectRef
- return or(ref)
-}
-
-var _ logr.Marshaler = ObjectRef{}
-
-// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
-// this interface may expand in the future, but will always be a subset of the
-// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
-type KMetadata interface {
- GetName() string
- GetNamespace() string
-}
-
-// KObj returns ObjectRef from ObjectMeta
-func KObj(obj KMetadata) ObjectRef {
- if obj == nil {
- return ObjectRef{}
- }
- if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() {
- return ObjectRef{}
- }
-
- return ObjectRef{
- Name: obj.GetName(),
- Namespace: obj.GetNamespace(),
- }
-}
-
-// KRef returns ObjectRef from name and namespace
-func KRef(namespace, name string) ObjectRef {
- return ObjectRef{
- Name: name,
- Namespace: namespace,
- }
-}
-
-// KObjs returns slice of ObjectRef from an slice of ObjectMeta
-//
-// DEPRECATED: Use KObjSlice instead, it has better performance.
-func KObjs(arg interface{}) []ObjectRef {
- s := reflect.ValueOf(arg)
- if s.Kind() != reflect.Slice {
- return nil
- }
- objectRefs := make([]ObjectRef, 0, s.Len())
- for i := 0; i < s.Len(); i++ {
- if v, ok := s.Index(i).Interface().(KMetadata); ok {
- objectRefs = append(objectRefs, KObj(v))
- } else {
- return nil
- }
- }
- return objectRefs
-}
-
-// KObjSlice takes a slice of objects that implement the KMetadata interface
-// and returns an object that gets logged as a slice of ObjectRef values or a
-// string containing those values, depending on whether the logger prefers text
-// output or structured output.
-//
-// An error string is logged when KObjSlice is not passed a suitable slice.
-//
-// Processing of the argument is delayed until the value actually gets logged,
-// in contrast to KObjs where that overhead is incurred regardless of whether
-// the result is needed.
-func KObjSlice(arg interface{}) interface{} {
- return kobjSlice{arg: arg}
-}
-
-type kobjSlice struct {
- arg interface{}
-}
-
-var _ fmt.Stringer = kobjSlice{}
-var _ logr.Marshaler = kobjSlice{}
-
-func (ks kobjSlice) String() string {
- objectRefs, errStr := ks.process()
- if errStr != "" {
- return errStr
- }
- return fmt.Sprintf("%v", objectRefs)
-}
-
-func (ks kobjSlice) MarshalLog() interface{} {
- objectRefs, errStr := ks.process()
- if errStr != "" {
- return errStr
- }
- return objectRefs
-}
-
-func (ks kobjSlice) process() (objs []interface{}, err string) {
- s := reflect.ValueOf(ks.arg)
- switch s.Kind() {
- case reflect.Invalid:
- // nil parameter, print as nil.
- return nil, ""
- case reflect.Slice:
- // Okay, handle below.
- default:
- return nil, fmt.Sprintf("", ks.arg)
- }
- objectRefs := make([]interface{}, 0, s.Len())
- for i := 0; i < s.Len(); i++ {
- item := s.Index(i).Interface()
- if item == nil {
- objectRefs = append(objectRefs, nil)
- } else if v, ok := item.(KMetadata); ok {
- objectRefs = append(objectRefs, KObj(v))
- } else {
- return nil, fmt.Sprintf("", item)
- }
- }
- return objectRefs, ""
-}
-
-var nilToken = []byte("")
-
-func (ks kobjSlice) WriteText(out *bytes.Buffer) {
- s := reflect.ValueOf(ks.arg)
- switch s.Kind() {
- case reflect.Invalid:
- // nil parameter, print as empty slice.
- out.WriteString("[]")
- return
- case reflect.Slice:
- // Okay, handle below.
- default:
- fmt.Fprintf(out, `""`, ks.arg)
- return
- }
- out.Write([]byte{'['})
- defer out.Write([]byte{']'})
- for i := 0; i < s.Len(); i++ {
- if i > 0 {
- out.Write([]byte{' '})
- }
- item := s.Index(i).Interface()
- if item == nil {
- out.Write(nilToken)
- } else if v, ok := item.(KMetadata); ok {
- KObj(v).writeUnquoted(out)
- } else {
- fmt.Fprintf(out, "", item)
- return
- }
- }
-}
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
deleted file mode 100644
index 466eeaf265..0000000000
--- a/vendor/k8s.io/klog/v2/klog.go
+++ /dev/null
@@ -1,1702 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
-// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
-//
-// Basic examples:
-//
-// klog.Info("Prepare to repel boarders")
-//
-// klog.Fatalf("Initialization failed: %s", err)
-//
-// See the documentation for the V function for an explanation of these examples:
-//
-// if klog.V(2) {
-// klog.Info("Starting transaction...")
-// }
-//
-// klog.V(2).Infoln("Processed", nItems, "elements")
-//
-// Log output is buffered and written periodically using Flush. Programs
-// should call Flush before exiting to guarantee all log output is written.
-//
-// By default, all log statements write to standard error.
-// This package provides several flags that modify this behavior.
-// As a result, flag.Parse must be called before any logging is done.
-//
-// -logtostderr=true
-// Logs are written to standard error instead of to files.
-// This shortcuts most of the usual output routing:
-// -alsologtostderr, -stderrthreshold and -log_dir have no
-// effect and output redirection at runtime with SetOutput is
-// ignored.
-// -alsologtostderr=false
-// Logs are written to standard error as well as to files.
-// -stderrthreshold=ERROR
-// Log events at or above this severity are logged to standard
-// error as well as to files.
-// -log_dir=""
-// Log files will be written to this directory instead of the
-// default temporary directory.
-//
-// Other flags provide aids to debugging.
-//
-// -log_backtrace_at=""
-// When set to a file and line number holding a logging statement,
-// such as
-// -log_backtrace_at=gopherflakes.go:234
-// a stack trace will be written to the Info log whenever execution
-// hits that statement. (Unlike with -vmodule, the ".go" must be
-// present.)
-// -v=0
-// Enable V-leveled logging at the specified level.
-// -vmodule=""
-// The syntax of the argument is a comma-separated list of pattern=N,
-// where pattern is a literal file name (minus the ".go" suffix) or
-// "glob" pattern and N is a V level. For instance,
-// -vmodule=gopher*=3
-// sets the V level to 3 in all Go files whose names begin "gopher".
-package klog
-
-import (
- "bufio"
- "bytes"
- "errors"
- "flag"
- "fmt"
- "io"
- stdLog "log"
- "math"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "k8s.io/klog/v2/internal/buffer"
- "k8s.io/klog/v2/internal/clock"
- "k8s.io/klog/v2/internal/dbg"
- "k8s.io/klog/v2/internal/serialize"
- "k8s.io/klog/v2/internal/severity"
-)
-
-// severityValue identifies the sort of log: info, warning etc. It also implements
-// the flag.Value interface. The -stderrthreshold flag is of type severity and
-// should be modified only through the flag.Value interface. The values match
-// the corresponding constants in C++.
-type severityValue struct {
- severity.Severity
-}
-
-// get returns the value of the severity.
-func (s *severityValue) get() severity.Severity {
- return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity)))
-}
-
-// set sets the value of the severity.
-func (s *severityValue) set(val severity.Severity) {
- atomic.StoreInt32((*int32)(&s.Severity), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (s *severityValue) String() string {
- return strconv.FormatInt(int64(s.Severity), 10)
-}
-
-// Get is part of the flag.Getter interface.
-func (s *severityValue) Get() interface{} {
- return s.Severity
-}
-
-// Set is part of the flag.Value interface.
-func (s *severityValue) Set(value string) error {
- var threshold severity.Severity
- // Is it a known name?
- if v, ok := severity.ByName(value); ok {
- threshold = v
- } else {
- v, err := strconv.ParseInt(value, 10, 32)
- if err != nil {
- return err
- }
- threshold = severity.Severity(v)
- }
- logging.stderrThreshold.set(threshold)
- return nil
-}
-
-// OutputStats tracks the number of output lines and bytes written.
-type OutputStats struct {
- lines int64
- bytes int64
-}
-
-// Lines returns the number of lines written.
-func (s *OutputStats) Lines() int64 {
- return atomic.LoadInt64(&s.lines)
-}
-
-// Bytes returns the number of bytes written.
-func (s *OutputStats) Bytes() int64 {
- return atomic.LoadInt64(&s.bytes)
-}
-
-// Stats tracks the number of lines of output and number of bytes
-// per severity level. Values must be read with atomic.LoadInt64.
-var Stats struct {
- Info, Warning, Error OutputStats
-}
-
-var severityStats = [severity.NumSeverity]*OutputStats{
- severity.InfoLog: &Stats.Info,
- severity.WarningLog: &Stats.Warning,
- severity.ErrorLog: &Stats.Error,
-}
-
-// Level is exported because it appears in the arguments to V and is
-// the type of the v flag, which can be set programmatically.
-// It's a distinct type because we want to discriminate it from logType.
-// Variables of type level are only changed under logging.mu.
-// The -v flag is read only with atomic ops, so the state of the logging
-// module is consistent.
-
-// Level is treated as a sync/atomic int32.
-
-// Level specifies a level of verbosity for V logs. *Level implements
-// flag.Value; the -v flag is of type Level and should be modified
-// only through the flag.Value interface.
-type Level int32
-
-// get returns the value of the Level.
-func (l *Level) get() Level {
- return Level(atomic.LoadInt32((*int32)(l)))
-}
-
-// set sets the value of the Level.
-func (l *Level) set(val Level) {
- atomic.StoreInt32((*int32)(l), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (l *Level) String() string {
- return strconv.FormatInt(int64(*l), 10)
-}
-
-// Get is part of the flag.Getter interface.
-func (l *Level) Get() interface{} {
- return *l
-}
-
-// Set is part of the flag.Value interface.
-func (l *Level) Set(value string) error {
- v, err := strconv.ParseInt(value, 10, 32)
- if err != nil {
- return err
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- logging.setVState(Level(v), logging.vmodule.filter, false)
- return nil
-}
-
-// moduleSpec represents the setting of the -vmodule flag.
-type moduleSpec struct {
- filter []modulePat
-}
-
-// modulePat contains a filter for the -vmodule flag.
-// It holds a verbosity level and a file pattern to match.
-type modulePat struct {
- pattern string
- literal bool // The pattern is a literal string
- level Level
-}
-
-// match reports whether the file matches the pattern. It uses a string
-// comparison if the pattern contains no metacharacters.
-func (m *modulePat) match(file string) bool {
- if m.literal {
- return file == m.pattern
- }
- match, _ := filepath.Match(m.pattern, file)
- return match
-}
-
-func (m *moduleSpec) String() string {
- // Lock because the type is not atomic. TODO: clean this up.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- return m.serialize()
-}
-
-func (m *moduleSpec) serialize() string {
- var b bytes.Buffer
- for i, f := range m.filter {
- if i > 0 {
- b.WriteRune(',')
- }
- fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
- }
- return b.String()
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported.
-func (m *moduleSpec) Get() interface{} {
- return nil
-}
-
-var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
-
-// Set will sets module value
-// Syntax: -vmodule=recordio=2,file=1,gfs*=3
-func (m *moduleSpec) Set(value string) error {
- filter, err := parseModuleSpec(value)
- if err != nil {
- return err
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- logging.setVState(logging.verbosity, filter, true)
- return nil
-}
-
-func parseModuleSpec(value string) ([]modulePat, error) {
- var filter []modulePat
- for _, pat := range strings.Split(value, ",") {
- if len(pat) == 0 {
- // Empty strings such as from a trailing comma can be ignored.
- continue
- }
- patLev := strings.Split(pat, "=")
- if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
- return nil, errVmoduleSyntax
- }
- pattern := patLev[0]
- v, err := strconv.ParseInt(patLev[1], 10, 32)
- if err != nil {
- return nil, errors.New("syntax error: expect comma-separated list of filename=N")
- }
- if v < 0 {
- return nil, errors.New("negative value for vmodule level")
- }
- if v == 0 {
- continue // Ignore. It's harmless but no point in paying the overhead.
- }
- // TODO: check syntax of filter?
- filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
- }
- return filter, nil
-}
-
-// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
-// that require filepath.Match to be called to match the pattern.
-func isLiteral(pattern string) bool {
- return !strings.ContainsAny(pattern, `\*?[]`)
-}
-
-// traceLocation represents the setting of the -log_backtrace_at flag.
-type traceLocation struct {
- file string
- line int
-}
-
-// isSet reports whether the trace location has been specified.
-// logging.mu is held.
-func (t *traceLocation) isSet() bool {
- return t.line > 0
-}
-
-// match reports whether the specified file and line matches the trace location.
-// The argument file name is the full path, not the basename specified in the flag.
-// logging.mu is held.
-func (t *traceLocation) match(file string, line int) bool {
- if t.line != line {
- return false
- }
- if i := strings.LastIndex(file, "/"); i >= 0 {
- file = file[i+1:]
- }
- return t.file == file
-}
-
-func (t *traceLocation) String() string {
- // Lock because the type is not atomic. TODO: clean this up.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- return fmt.Sprintf("%s:%d", t.file, t.line)
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported
-func (t *traceLocation) Get() interface{} {
- return nil
-}
-
-var errTraceSyntax = errors.New("syntax error: expect file.go:234")
-
-// Set will sets backtrace value
-// Syntax: -log_backtrace_at=gopherflakes.go:234
-// Note that unlike vmodule the file extension is included here.
-func (t *traceLocation) Set(value string) error {
- if value == "" {
- // Unset.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- t.line = 0
- t.file = ""
- return nil
- }
- fields := strings.Split(value, ":")
- if len(fields) != 2 {
- return errTraceSyntax
- }
- file, line := fields[0], fields[1]
- if !strings.Contains(file, ".") {
- return errTraceSyntax
- }
- v, err := strconv.Atoi(line)
- if err != nil {
- return errTraceSyntax
- }
- if v <= 0 {
- return errors.New("negative or zero value for level")
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- t.line = v
- t.file = file
- return nil
-}
-
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
- Flush() error
- Sync() error
- io.Writer
-}
-
-var logging loggingT
-var commandLine flag.FlagSet
-
-// init sets up the defaults and creates command line flags.
-func init() {
- commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
- commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)")
- commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800,
- "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
- "If the value is 0, the maximum file size is unlimited.")
- commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files")
- commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)")
- logging.setVState(0, nil, false)
- commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity")
- commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages")
- commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
- commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
- commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
- logging.stderrThreshold = severityValue{
- Severity: severity.ErrorLog, // Default stderrThreshold is ERROR.
- }
- commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
- commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
- commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-
- logging.settings.contextualLoggingEnabled = true
- logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil)
-}
-
-// InitFlags is for explicitly initializing the flags.
-// It may get called repeatedly for different flagsets, but not
-// twice for the same one. May get called concurrently
-// to other goroutines using klog. However, only some flags
-// may get set concurrently (see implementation).
-func InitFlags(flagset *flag.FlagSet) {
- if flagset == nil {
- flagset = flag.CommandLine
- }
-
- commandLine.VisitAll(func(f *flag.Flag) {
- flagset.Var(f.Value, f.Name, f.Usage)
- })
-}
-
-// Flush flushes all pending log I/O.
-func Flush() {
- logging.lockAndFlushAll()
-}
-
-// settings collects global settings.
-type settings struct {
- // contextualLoggingEnabled controls whether contextual logging is
- // active. Disabling it may have some small performance benefit.
- contextualLoggingEnabled bool
-
- // logger is the global Logger chosen by users of klog, nil if
- // none is available.
- logger *logWriter
-
- // loggerOptions contains the options that were supplied for
- // globalLogger.
- loggerOptions loggerOptions
-
- // Boolean flags. Not handled atomically because the flag.Value interface
- // does not let us avoid the =true, and that shorthand is necessary for
- // compatibility. TODO: does this matter enough to fix? Seems unlikely.
- toStderr bool // The -logtostderr flag.
- alsoToStderr bool // The -alsologtostderr flag.
-
- // Level flag. Handled atomically.
- stderrThreshold severityValue // The -stderrthreshold flag.
-
- // Access to all of the following fields must be protected via a mutex.
-
- // file holds writer for each of the log types.
- file [severity.NumSeverity]flushSyncWriter
- // flushInterval is the interval for periodic flushing. If zero,
- // the global default will be used.
- flushInterval time.Duration
-
- // filterLength stores the length of the vmodule filter chain. If greater
- // than zero, it means vmodule is enabled. It may be read safely
- // using sync.LoadInt32, but is only modified under mu.
- filterLength int32
- // traceLocation is the state of the -log_backtrace_at flag.
- traceLocation traceLocation
- // These flags are modified only under lock, although verbosity may be fetched
- // safely using atomic.LoadInt32.
- vmodule moduleSpec // The state of the -vmodule flag.
- verbosity Level // V logging level, the value of the -v flag/
-
- // If non-empty, overrides the choice of directory in which to write logs.
- // See createLogDirs for the full list of possible destinations.
- logDir string
-
- // If non-empty, specifies the path of the file to write logs. mutually exclusive
- // with the log_dir option.
- logFile string
-
- // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
- // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
- logFileMaxSizeMB uint64
-
- // If true, do not add the prefix headers, useful when used with SetOutput
- skipHeaders bool
-
- // If true, do not add the headers to log files
- skipLogHeaders bool
-
- // If true, add the file directory to the header
- addDirHeader bool
-
- // If true, messages will not be propagated to lower severity log levels
- oneOutput bool
-
- // If set, all output will be filtered through the filter.
- filter LogFilter
-}
-
-// deepCopy creates a copy that doesn't share anything with the original
-// instance.
-func (s settings) deepCopy() settings {
- // vmodule is a slice and would be shared, so we have copy it.
- filter := make([]modulePat, len(s.vmodule.filter))
- for i := range s.vmodule.filter {
- filter[i] = s.vmodule.filter[i]
- }
- s.vmodule.filter = filter
-
- if s.logger != nil {
- logger := *s.logger
- s.logger = &logger
- }
-
- return s
-}
-
-// loggingT collects all the global state of the logging setup.
-type loggingT struct {
- settings
-
- // flushD holds a flushDaemon that frequently flushes log file buffers.
- // Uses its own mutex.
- flushD *flushDaemon
-
- // mu protects the remaining elements of this structure and the fields
- // in settingsT which need a mutex lock.
- mu sync.Mutex
-
- // pcs is used in V to avoid an allocation when computing the caller's PC.
- pcs [1]uintptr
- // vmap is a cache of the V Level for each V() call site, identified by PC.
- // It is wiped whenever the vmodule flag changes state.
- vmap map[uintptr]Level
-}
-
-// setVState sets a consistent state for V logging.
-// l.mu is held.
-func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
- // Turn verbosity off so V will not fire while we are in transition.
- l.verbosity.set(0)
- // Ditto for filter length.
- atomic.StoreInt32(&l.filterLength, 0)
-
- // Set the new filters and wipe the pc->Level map if the filter has changed.
- if setFilter {
- l.vmodule.filter = filter
- l.vmap = make(map[uintptr]Level)
- }
-
- // Things are consistent now, so enable filtering and verbosity.
- // They are enabled in order opposite to that in V.
- atomic.StoreInt32(&l.filterLength, int32(len(filter)))
- l.verbosity.set(verbosity)
-}
-
-var timeNow = time.Now // Stubbed out for testing.
-
-// CaptureState gathers information about all current klog settings.
-// The result can be used to restore those settings.
-func CaptureState() State {
- logging.mu.Lock()
- defer logging.mu.Unlock()
- return &state{
- settings: logging.settings.deepCopy(),
- flushDRunning: logging.flushD.isRunning(),
- maxSize: MaxSize,
- }
-}
-
-// State stores a snapshot of klog settings. It gets created with CaptureState
-// and can be used to restore the entire state. Modifying individual settings
-// is supported via the command line flags.
-type State interface {
- // Restore restore the entire state. It may get called more than once.
- Restore()
-}
-
-type state struct {
- settings
-
- flushDRunning bool
- maxSize uint64
-}
-
-func (s *state) Restore() {
- // This needs to be done before mutex locking.
- if s.flushDRunning && !logging.flushD.isRunning() {
- // This is not quite accurate: StartFlushDaemon might
- // have been called with some different interval.
- interval := s.flushInterval
- if interval == 0 {
- interval = flushInterval
- }
- logging.flushD.run(interval)
- } else if !s.flushDRunning && logging.flushD.isRunning() {
- logging.flushD.stop()
- }
-
- logging.mu.Lock()
- defer logging.mu.Unlock()
-
- logging.settings = s.settings
- logging.setVState(s.verbosity, s.vmodule.filter, true)
- MaxSize = s.maxSize
-}
-
-/*
-header formats a log header as defined by the C++ implementation.
-It returns a buffer containing the formatted header and the user's file and line number.
-The depth specifies how many stack frames above lives the source line to be identified in the log message.
-
-Log lines have this form:
-
- Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
-
-where the fields are defined as follows:
-
- L A single character, representing the log level (eg 'I' for INFO)
- mm The month (zero padded; ie May is '05')
- dd The day (zero padded)
- hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
- threadid The space-padded thread ID as returned by GetTID()
- file The file name
- line The line number
- msg The user-supplied message
-*/
-func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) {
- _, file, line, ok := runtime.Caller(3 + depth)
- if !ok {
- file = "???"
- line = 1
- } else {
- if slash := strings.LastIndex(file, "/"); slash >= 0 {
- path := file
- file = path[slash+1:]
- if l.addDirHeader {
- if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
- file = path[dirsep+1:]
- }
- }
- }
- }
- return l.formatHeader(s, file, line), file, line
-}
-
-// formatHeader formats a log header using the provided file name and line number.
-func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
- buf := buffer.GetBuffer()
- if l.skipHeaders {
- return buf
- }
- now := timeNow()
- buf.FormatHeader(s, file, line, now)
- return buf
-}
-
-func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
- l.printlnDepth(s, logger, filter, 1, args...)
-}
-
-func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
- buf, file, line := l.header(s, depth)
- // If a logger is set and doesn't support writing a formatted buffer,
- // we clear the generated header as we rely on the backing
- // logger implementation to print headers.
- if logger != nil && logger.writeKlogBuffer == nil {
- buffer.PutBuffer(buf)
- buf = buffer.GetBuffer()
- }
- if filter != nil {
- args = filter.Filter(args)
- }
- fmt.Fprintln(buf, args...)
- l.output(s, logger, buf, depth, file, line, false)
-}
-
-func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
- l.printDepth(s, logger, filter, 1, args...)
-}
-
-func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
- buf, file, line := l.header(s, depth)
- // If a logger is set and doesn't support writing a formatted buffer,
- // we clear the generated header as we rely on the backing
- // logger implementation to print headers.
- if logger != nil && logger.writeKlogBuffer == nil {
- buffer.PutBuffer(buf)
- buf = buffer.GetBuffer()
- }
- if filter != nil {
- args = filter.Filter(args)
- }
- fmt.Fprint(buf, args...)
- if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, logger, buf, depth, file, line, false)
-}
-
-func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) {
- l.printfDepth(s, logger, filter, 1, format, args...)
-}
-
-func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) {
- buf, file, line := l.header(s, depth)
- // If a logger is set and doesn't support writing a formatted buffer,
- // we clear the generated header as we rely on the backing
- // logger implementation to print headers.
- if logger != nil && logger.writeKlogBuffer == nil {
- buffer.PutBuffer(buf)
- buf = buffer.GetBuffer()
- }
- if filter != nil {
- format, args = filter.FilterF(format, args)
- }
- fmt.Fprintf(buf, format, args...)
- if buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, logger, buf, depth, file, line, false)
-}
-
-// printWithFileLine behaves like print but uses the provided file and line number. If
-// alsoLogToStderr is true, the log message always appears on standard error; it
-// will also appear in the log file unless --logtostderr is set.
-func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
- buf := l.formatHeader(s, file, line)
- // If a logger is set and doesn't support writing a formatted buffer,
- // we clear the generated header as we rely on the backing
- // logger implementation to print headers.
- if logger != nil && logger.writeKlogBuffer == nil {
- buffer.PutBuffer(buf)
- buf = buffer.GetBuffer()
- }
- if filter != nil {
- args = filter.Filter(args)
- }
- fmt.Fprint(buf, args...)
- if buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr)
-}
-
-// if loggr is specified, will call loggr.Error, otherwise output with logging module.
-func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
- if filter != nil {
- msg, keysAndValues = filter.FilterS(msg, keysAndValues)
- }
- if logger != nil {
- logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...)
- return
- }
- l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...)
-}
-
-// if loggr is specified, will call loggr.Info, otherwise output with logging module.
-func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
- if filter != nil {
- msg, keysAndValues = filter.FilterS(msg, keysAndValues)
- }
- if logger != nil {
- logger.WithCallDepth(depth+2).Info(msg, keysAndValues...)
- return
- }
- l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...)
-}
-
-// printS is called from infoS and errorS if loggr is not specified.
-// set log severity by s
-func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
- // Only create a new buffer if we don't have one cached.
- b := buffer.GetBuffer()
- // The message is always quoted, even if it contains line breaks.
- // If developers want multi-line output, they should use a small, fixed
- // message and put the multi-line output into a value.
- b.WriteString(strconv.Quote(msg))
- if err != nil {
- serialize.KVListFormat(&b.Buffer, "err", err)
- }
- serialize.KVListFormat(&b.Buffer, keysAndValues...)
- l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
- // Make the buffer available for reuse.
- buffer.PutBuffer(b)
-}
-
-// redirectBuffer is used to set an alternate destination for the logs
-type redirectBuffer struct {
- w io.Writer
-}
-
-func (rb *redirectBuffer) Sync() error {
- return nil
-}
-
-func (rb *redirectBuffer) Flush() error {
- return nil
-}
-
-func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
- return rb.w.Write(bytes)
-}
-
-// SetOutput sets the output destination for all severities
-func SetOutput(w io.Writer) {
- logging.mu.Lock()
- defer logging.mu.Unlock()
- for s := severity.FatalLog; s >= severity.InfoLog; s-- {
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[s] = rb
- }
-}
-
-// SetOutputBySeverity sets the output destination for specific severity
-func SetOutputBySeverity(name string, w io.Writer) {
- logging.mu.Lock()
- defer logging.mu.Unlock()
- sev, ok := severity.ByName(name)
- if !ok {
- panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
- }
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[sev] = rb
-}
-
-// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
-func LogToStderr(stderr bool) {
- logging.mu.Lock()
- defer logging.mu.Unlock()
-
- logging.toStderr = stderr
-}
-
-// output writes the data to the log files and releases the buffer.
-func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
- var isLocked = true
- l.mu.Lock()
- defer func() {
- if isLocked {
- // Unlock before returning in case that it wasn't done already.
- l.mu.Unlock()
- }
- }()
-
- if l.traceLocation.isSet() {
- if l.traceLocation.match(file, line) {
- buf.Write(dbg.Stacks(false))
- }
- }
- data := buf.Bytes()
- if logger != nil {
- if logger.writeKlogBuffer != nil {
- logger.writeKlogBuffer(data)
- } else {
- // TODO: set 'severity' and caller information as structured log info
- // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
- if s == severity.ErrorLog {
- logger.WithCallDepth(depth+3).Error(nil, string(data))
- } else {
- logger.WithCallDepth(depth + 3).Info(string(data))
- }
- }
- } else if l.toStderr {
- os.Stderr.Write(data)
- } else {
- if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
- os.Stderr.Write(data)
- }
-
- if logging.logFile != "" {
- // Since we are using a single log file, all of the items in l.file array
- // will point to the same file, so just use one of them to write data.
- if l.file[severity.InfoLog] == nil {
- if err := l.createFiles(severity.InfoLog); err != nil {
- os.Stderr.Write(data) // Make sure the message appears somewhere.
- l.exit(err)
- }
- }
- l.file[severity.InfoLog].Write(data)
- } else {
- if l.file[s] == nil {
- if err := l.createFiles(s); err != nil {
- os.Stderr.Write(data) // Make sure the message appears somewhere.
- l.exit(err)
- }
- }
-
- if l.oneOutput {
- l.file[s].Write(data)
- } else {
- switch s {
- case severity.FatalLog:
- l.file[severity.FatalLog].Write(data)
- fallthrough
- case severity.ErrorLog:
- l.file[severity.ErrorLog].Write(data)
- fallthrough
- case severity.WarningLog:
- l.file[severity.WarningLog].Write(data)
- fallthrough
- case severity.InfoLog:
- l.file[severity.InfoLog].Write(data)
- }
- }
- }
- }
- if s == severity.FatalLog {
- // If we got here via Exit rather than Fatal, print no stacks.
- if atomic.LoadUint32(&fatalNoStacks) > 0 {
- l.mu.Unlock()
- isLocked = false
- timeoutFlush(ExitFlushTimeout)
- OsExit(1)
- }
- // Dump all goroutine stacks before exiting.
- // First, make sure we see the trace for the current goroutine on standard error.
- // If -logtostderr has been specified, the loop below will do that anyway
- // as the first stack in the full dump.
- if !l.toStderr {
- os.Stderr.Write(dbg.Stacks(false))
- }
-
- // Write the stack trace for all goroutines to the files.
- trace := dbg.Stacks(true)
- logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
- for log := severity.FatalLog; log >= severity.InfoLog; log-- {
- if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
- f.Write(trace)
- }
- }
- l.mu.Unlock()
- isLocked = false
- timeoutFlush(ExitFlushTimeout)
- OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
- }
- buffer.PutBuffer(buf)
-
- if stats := severityStats[s]; stats != nil {
- atomic.AddInt64(&stats.lines, 1)
- atomic.AddInt64(&stats.bytes, int64(len(data)))
- }
-}
-
-// logExitFunc provides a simple mechanism to override the default behavior
-// of exiting on error. Used in testing and to guarantee we reach a required exit
-// for fatal logs. Instead, exit could be a function rather than a method but that
-// would make its use clumsier.
-var logExitFunc func(error)
-
-// exit is called if there is trouble creating or writing log files.
-// It flushes the logs and exits the program; there's no point in hanging around.
-// l.mu is held.
-func (l *loggingT) exit(err error) {
- fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
- // If logExitFunc is set, we do that instead of exiting.
- if logExitFunc != nil {
- logExitFunc(err)
- return
- }
- l.flushAll()
- OsExit(2)
-}
-
-// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
-// file's Sync method and providing a wrapper for the Write method that provides log
-// file rotation. There are conflicting methods, so the file cannot be embedded.
-// l.mu is held for all its methods.
-type syncBuffer struct {
- logger *loggingT
- *bufio.Writer
- file *os.File
- sev severity.Severity
- nbytes uint64 // The number of bytes written to this file
- maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
-}
-
-func (sb *syncBuffer) Sync() error {
- return sb.file.Sync()
-}
-
-// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
-func CalculateMaxSize() uint64 {
- if logging.logFile != "" {
- if logging.logFileMaxSizeMB == 0 {
- // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
- return math.MaxUint64
- }
- // Flag logFileMaxSizeMB is in MB for user convenience.
- return logging.logFileMaxSizeMB * 1024 * 1024
- }
- // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
- return MaxSize
-}
-
-func (sb *syncBuffer) Write(p []byte) (n int, err error) {
- if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
- if err := sb.rotateFile(time.Now(), false); err != nil {
- sb.logger.exit(err)
- }
- }
- n, err = sb.Writer.Write(p)
- sb.nbytes += uint64(n)
- if err != nil {
- sb.logger.exit(err)
- }
- return
-}
-
-// rotateFile closes the syncBuffer's file and starts a new one.
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
- if sb.file != nil {
- sb.Flush()
- sb.file.Close()
- }
- var err error
- sb.file, _, err = create(severity.Name[sb.sev], now, startup)
- if err != nil {
- return err
- }
- if startup {
- fileInfo, err := sb.file.Stat()
- if err != nil {
- return fmt.Errorf("file stat could not get fileinfo: %v", err)
- }
- // init file size
- sb.nbytes = uint64(fileInfo.Size())
- } else {
- sb.nbytes = 0
- }
- sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
-
- if sb.logger.skipLogHeaders {
- return nil
- }
-
- // Write header.
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
- fmt.Fprintf(&buf, "Running on machine: %s\n", host)
- fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
- fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
- n, err := sb.file.Write(buf.Bytes())
- sb.nbytes += uint64(n)
- return err
-}
-
-// bufferSize sizes the buffer associated with each log file. It's large
-// so that log records can accumulate without the logging thread blocking
-// on disk I/O. The flushDaemon will block instead.
-const bufferSize = 256 * 1024
-
-// createFiles creates all the log files for severity from sev down to infoLog.
-// l.mu is held.
-func (l *loggingT) createFiles(sev severity.Severity) error {
- interval := l.flushInterval
- if interval == 0 {
- interval = flushInterval
- }
- l.flushD.run(interval)
- now := time.Now()
- // Files are created in decreasing severity order, so as soon as we find one
- // has already been created, we can stop.
- for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- {
- sb := &syncBuffer{
- logger: l,
- sev: s,
- maxbytes: CalculateMaxSize(),
- }
- if err := sb.rotateFile(now, true); err != nil {
- return err
- }
- l.file[s] = sb
- }
- return nil
-}
-
-const flushInterval = 5 * time.Second
-
-// flushDaemon periodically flushes the log file buffers.
-type flushDaemon struct {
- mu sync.Mutex
- clock clock.WithTicker
- flush func()
- stopC chan struct{}
- stopDone chan struct{}
-}
-
-// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a
-// clock.RealClock is used.
-func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon {
- if tickClock == nil {
- tickClock = clock.RealClock{}
- }
- return &flushDaemon{
- flush: flush,
- clock: tickClock,
- }
-}
-
-// run starts a goroutine that periodically calls the daemons flush function.
-// Calling run on an already running daemon will have no effect.
-func (f *flushDaemon) run(interval time.Duration) {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- if f.stopC != nil { // daemon already running
- return
- }
-
- f.stopC = make(chan struct{}, 1)
- f.stopDone = make(chan struct{}, 1)
-
- ticker := f.clock.NewTicker(interval)
- go func() {
- defer ticker.Stop()
- defer func() { f.stopDone <- struct{}{} }()
- for {
- select {
- case <-ticker.C():
- f.flush()
- case <-f.stopC:
- f.flush()
- return
- }
- }
- }()
-}
-
-// stop stops the running flushDaemon and waits until the daemon has shut down.
-// Calling stop on a daemon that isn't running will have no effect.
-func (f *flushDaemon) stop() {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- if f.stopC == nil { // daemon not running
- return
- }
-
- f.stopC <- struct{}{}
- <-f.stopDone
-
- f.stopC = nil
- f.stopDone = nil
-}
-
-// isRunning returns true if the flush daemon is running.
-func (f *flushDaemon) isRunning() bool {
- f.mu.Lock()
- defer f.mu.Unlock()
- return f.stopC != nil
-}
-
-// StopFlushDaemon stops the flush daemon, if running, and flushes once.
-// This prevents klog from leaking goroutines on shutdown. After stopping
-// the daemon, you can still manually flush buffers again by calling Flush().
-func StopFlushDaemon() {
- logging.flushD.stop()
-}
-
-// StartFlushDaemon ensures that the flush daemon runs with the given delay
-// between flush calls. If it is already running, it gets restarted.
-func StartFlushDaemon(interval time.Duration) {
- StopFlushDaemon()
- logging.flushD.run(interval)
-}
-
-// lockAndFlushAll is like flushAll but locks l.mu first.
-func (l *loggingT) lockAndFlushAll() {
- l.mu.Lock()
- l.flushAll()
- l.mu.Unlock()
-}
-
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
-// l.mu is held.
-func (l *loggingT) flushAll() {
- // Flush from fatal down, in case there's trouble flushing.
- for s := severity.FatalLog; s >= severity.InfoLog; s-- {
- file := l.file[s]
- if file != nil {
- file.Flush() // ignore error
- file.Sync() // ignore error
- }
- }
- if logging.loggerOptions.flush != nil {
- logging.loggerOptions.flush()
- }
-}
-
-// CopyStandardLogTo arranges for messages written to the Go "log" package's
-// default logs to also appear in the Google logs for the named and lower
-// severities. Subsequent changes to the standard log's default output location
-// or format may break this behavior.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
-// recognized, CopyStandardLogTo panics.
-func CopyStandardLogTo(name string) {
- sev, ok := severity.ByName(name)
- if !ok {
- panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
- }
- // Set a log format that captures the user's file and line:
- // d.go:23: message
- stdLog.SetFlags(stdLog.Lshortfile)
- stdLog.SetOutput(logBridge(sev))
-}
-
-// logBridge provides the Write method that enables CopyStandardLogTo to connect
-// Go's standard logs to the logs provided by this package.
-type logBridge severity.Severity
-
-// Write parses the standard logging line and passes its components to the
-// logger for severity(lb).
-func (lb logBridge) Write(b []byte) (n int, err error) {
- var (
- file = "???"
- line = 1
- text string
- )
- // Split "d.go:23: message" into "d.go", "23", and "message".
- if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
- text = fmt.Sprintf("bad log format: %s", b)
- } else {
- file = string(parts[0])
- text = string(parts[2][1:]) // skip leading space
- line, err = strconv.Atoi(string(parts[1]))
- if err != nil {
- text = fmt.Sprintf("bad line number: %s", b)
- line = 1
- }
- }
- // printWithFileLine with alsoToStderr=true, so standard log messages
- // always appear on standard error.
- logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text)
- return len(b), nil
-}
-
-// setV computes and remembers the V level for a given PC
-// when vmodule is enabled.
-// File pattern matching takes the basename of the file, stripped
-// of its .go suffix, and uses filepath.Match, which is a little more
-// general than the *? matching used in C++.
-// l.mu is held.
-func (l *loggingT) setV(pc uintptr) Level {
- fn := runtime.FuncForPC(pc)
- file, _ := fn.FileLine(pc)
- // The file is something like /a/b/c/d.go. We want just the d.
- if strings.HasSuffix(file, ".go") {
- file = file[:len(file)-3]
- }
- if slash := strings.LastIndex(file, "/"); slash >= 0 {
- file = file[slash+1:]
- }
- for _, filter := range l.vmodule.filter {
- if filter.match(file) {
- l.vmap[pc] = filter.level
- return filter.level
- }
- }
- l.vmap[pc] = 0
- return 0
-}
-
-// Verbose is a boolean type that implements Infof (like Printf) etc.
-// See the documentation of V for more information.
-type Verbose struct {
- enabled bool
- logger *logWriter
-}
-
-func newVerbose(level Level, b bool) Verbose {
- if logging.logger == nil {
- return Verbose{b, nil}
- }
- v := logging.logger.V(int(level))
- return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}}
-}
-
-// V reports whether verbosity at the call site is at least the requested level.
-// The returned value is a struct of type Verbose, which implements Info, Infoln
-// and Infof. These methods will write to the Info log if called.
-// Thus, one may write either
-//
-// if klog.V(2).Enabled() { klog.Info("log this") }
-//
-// or
-//
-// klog.V(2).Info("log this")
-//
-// The second form is shorter but the first is cheaper if logging is off because it does
-// not evaluate its arguments.
-//
-// Whether an individual call to V generates a log record depends on the setting of
-// the -v and -vmodule flags; both are off by default. The V call will log if its level
-// is less than or equal to the value of the -v flag, or alternatively if its level is
-// less than or equal to the value of the -vmodule pattern matching the source file
-// containing the call.
-func V(level Level) Verbose {
- return VDepth(1, level)
-}
-
-// VDepth is a variant of V that accepts a number of stack frames that will be
-// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
-// V().
-func VDepth(depth int, level Level) Verbose {
- // This function tries hard to be cheap unless there's work to do.
- // The fast path is two atomic loads and compares.
-
- // Here is a cheap but safe test to see if V logging is enabled globally.
- if logging.verbosity.get() >= level {
- return newVerbose(level, true)
- }
-
- // It's off globally but vmodule may still be set.
- // Here is another cheap but safe test to see if vmodule is enabled.
- if atomic.LoadInt32(&logging.filterLength) > 0 {
- // Now we need a proper lock to use the logging structure. The pcs field
- // is shared so we must lock before accessing it. This is fairly expensive,
- // but if V logging is enabled we're slow anyway.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
- return newVerbose(level, false)
- }
- // runtime.Callers returns "return PCs", but we want
- // to look up the symbolic information for the call,
- // so subtract 1 from the PC. runtime.CallersFrames
- // would be cleaner, but allocates.
- pc := logging.pcs[0] - 1
- v, ok := logging.vmap[pc]
- if !ok {
- v = logging.setV(pc)
- }
- return newVerbose(level, v >= level)
- }
- return newVerbose(level, false)
-}
-
-// Enabled will return true if this log level is enabled, guarded by the value
-// of v.
-// See the documentation of V for usage.
-func (v Verbose) Enabled() bool {
- return v.enabled
-}
-
-// Info is equivalent to the global Info function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Info(args ...interface{}) {
- if v.enabled {
- logging.print(severity.InfoLog, v.logger, logging.filter, args...)
- }
-}
-
-// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoDepth(depth int, args ...interface{}) {
- if v.enabled {
- logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
- }
-}
-
-// Infoln is equivalent to the global Infoln function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infoln(args ...interface{}) {
- if v.enabled {
- logging.println(severity.InfoLog, v.logger, logging.filter, args...)
- }
-}
-
-// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
- if v.enabled {
- logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
- }
-}
-
-// Infof is equivalent to the global Infof function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infof(format string, args ...interface{}) {
- if v.enabled {
- logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...)
- }
-}
-
-// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
- if v.enabled {
- logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...)
- }
-}
-
-// InfoS is equivalent to the global InfoS function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
- if v.enabled {
- logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...)
- }
-}
-
-// InfoSDepth acts as InfoS but uses depth to determine which call frame to log.
-// InfoSDepth(0, "msg") is the same as InfoS("msg").
-func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
- logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...)
-}
-
-// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
- if v.enabled {
- logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...)
- }
-}
-
-// Deprecated: Use ErrorS instead.
-func (v Verbose) Error(err error, msg string, args ...interface{}) {
- if v.enabled {
- logging.errorS(err, v.logger, logging.filter, 0, msg, args...)
- }
-}
-
-// ErrorS is equivalent to the global Error function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
- if v.enabled {
- logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...)
- }
-}
-
-// Info logs to the INFO log.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Info(args ...interface{}) {
- logging.print(severity.InfoLog, logging.logger, logging.filter, args...)
-}
-
-// InfoDepth acts as Info but uses depth to determine which call frame to log.
-// InfoDepth(0, "msg") is the same as Info("msg").
-func InfoDepth(depth int, args ...interface{}) {
- logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Infoln logs to the INFO log.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Infoln(args ...interface{}) {
- logging.println(severity.InfoLog, logging.logger, logging.filter, args...)
-}
-
-// InfolnDepth acts as Infoln but uses depth to determine which call frame to log.
-// InfolnDepth(0, "msg") is the same as Infoln("msg").
-func InfolnDepth(depth int, args ...interface{}) {
- logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Infof logs to the INFO log.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Infof(format string, args ...interface{}) {
- logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...)
-}
-
-// InfofDepth acts as Infof but uses depth to determine which call frame to log.
-// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...).
-func InfofDepth(depth int, format string, args ...interface{}) {
- logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...)
-}
-
-// InfoS structured logs to the INFO log.
-// The msg argument used to add constant description to the log line.
-// The key/value pairs would be join by "=" ; a newline is always appended.
-//
-// Basic examples:
-// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready")
-// output:
-// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
-func InfoS(msg string, keysAndValues ...interface{}) {
- logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...)
-}
-
-// Warning logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Warning(args ...interface{}) {
- logging.print(severity.WarningLog, logging.logger, logging.filter, args...)
-}
-
-// WarningDepth acts as Warning but uses depth to determine which call frame to log.
-// WarningDepth(0, "msg") is the same as Warning("msg").
-func WarningDepth(depth int, args ...interface{}) {
- logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Warningln logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Warningln(args ...interface{}) {
- logging.println(severity.WarningLog, logging.logger, logging.filter, args...)
-}
-
-// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log.
-// WarninglnDepth(0, "msg") is the same as Warningln("msg").
-func WarninglnDepth(depth int, args ...interface{}) {
- logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Warningf logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Warningf(format string, args ...interface{}) {
- logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...)
-}
-
-// WarningfDepth acts as Warningf but uses depth to determine which call frame to log.
-// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...).
-func WarningfDepth(depth int, format string, args ...interface{}) {
- logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...)
-}
-
-// Error logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Error(args ...interface{}) {
- logging.print(severity.ErrorLog, logging.logger, logging.filter, args...)
-}
-
-// ErrorDepth acts as Error but uses depth to determine which call frame to log.
-// ErrorDepth(0, "msg") is the same as Error("msg").
-func ErrorDepth(depth int, args ...interface{}) {
- logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Errorln logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Errorln(args ...interface{}) {
- logging.println(severity.ErrorLog, logging.logger, logging.filter, args...)
-}
-
-// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log.
-// ErrorlnDepth(0, "msg") is the same as Errorln("msg").
-func ErrorlnDepth(depth int, args ...interface{}) {
- logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Errorf logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Errorf(format string, args ...interface{}) {
- logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...)
-}
-
-// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log.
-// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...).
-func ErrorfDepth(depth int, format string, args ...interface{}) {
- logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...)
-}
-
-// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
-// the err argument used as "err" field of log line.
-// The msg argument used to add constant description to the log line.
-// The key/value pairs would be join by "=" ; a newline is always appended.
-//
-// Basic examples:
-// >> klog.ErrorS(err, "Failed to update pod status")
-// output:
-// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout"
-func ErrorS(err error, msg string, keysAndValues ...interface{}) {
- logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...)
-}
-
-// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log.
-// ErrorSDepth(0, "msg") is the same as ErrorS("msg").
-func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
- logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...)
-}
-
-// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
-// prints stack trace(s), then calls OsExit(255).
-//
-// Stderr only receives a dump of the current goroutine's stack trace. Log files,
-// if there are any, receive a dump of the stack traces in all goroutines.
-//
-// Callers who want more control over handling of fatal events may instead use a
-// combination of different functions:
-// - some info or error logging function, optionally with a stack trace
-// value generated by github.com/go-logr/lib/dbg.Backtrace
-// - Flush to flush pending log data
-// - panic, os.Exit or returning to the caller with an error
-//
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Fatal(args ...interface{}) {
- logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
-}
-
-// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
-// FatalDepth(0, "msg") is the same as Fatal("msg").
-func FatalDepth(depth int, args ...interface{}) {
- logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls OsExit(255).
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Fatalln(args ...interface{}) {
- logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
-}
-
-// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log.
-// FatallnDepth(0, "msg") is the same as Fatalln("msg").
-func FatallnDepth(depth int, args ...interface{}) {
- logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls OsExit(255).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Fatalf(format string, args ...interface{}) {
- logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
-}
-
-// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log.
-// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...).
-func FatalfDepth(depth int, format string, args ...interface{}) {
- logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
-}
-
-// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
-// It allows Exit and relatives to use the Fatal logs.
-var fatalNoStacks uint32
-
-// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Exit(args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
-}
-
-// ExitDepth acts as Exit but uses depth to determine which call frame to log.
-// ExitDepth(0, "msg") is the same as Exit("msg").
-func ExitDepth(depth int, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
-func Exitln(args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
-}
-
-// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log.
-// ExitlnDepth(0, "msg") is the same as Exitln("msg").
-func ExitlnDepth(depth int, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
-}
-
-// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Exitf(format string, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
-}
-
-// ExitfDepth acts as Exitf but uses depth to determine which call frame to log.
-// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...).
-func ExitfDepth(depth int, format string, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
-}
-
-// LogFilter is a collection of functions that can filter all logging calls,
-// e.g. for sanitization of arguments and prevent accidental leaking of secrets.
-type LogFilter interface {
- Filter(args []interface{}) []interface{}
- FilterF(format string, args []interface{}) (string, []interface{})
- FilterS(msg string, keysAndValues []interface{}) (string, []interface{})
-}
-
-// SetLogFilter installs a filter that is used for all log calls.
-//
-// Modifying the filter is not thread-safe and should be done while no other
-// goroutines invoke log calls, usually during program initialization.
-func SetLogFilter(filter LogFilter) {
- logging.filter = filter
-}
diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go
deleted file mode 100644
index 1025d644f3..0000000000
--- a/vendor/k8s.io/klog/v2/klog_file.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// File I/O for logs.
-
-package klog
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "time"
-)
-
-// MaxSize is the maximum size of a log file in bytes.
-var MaxSize uint64 = 1024 * 1024 * 1800
-
-// logDirs lists the candidate directories for new log files.
-var logDirs []string
-
-func createLogDirs() {
- if logging.logDir != "" {
- logDirs = append(logDirs, logging.logDir)
- }
- logDirs = append(logDirs, os.TempDir())
-}
-
-var (
- pid = os.Getpid()
- program = filepath.Base(os.Args[0])
- host = "unknownhost"
- userName = "unknownuser"
- userNameOnce sync.Once
-)
-
-func init() {
- if h, err := os.Hostname(); err == nil {
- host = shortHostname(h)
- }
-}
-
-// shortHostname returns its argument, truncating at the first period.
-// For instance, given "www.google.com" it returns "www".
-func shortHostname(hostname string) string {
- if i := strings.Index(hostname, "."); i >= 0 {
- return hostname[:i]
- }
- return hostname
-}
-
-// logName returns a new log file name containing tag, with start time t, and
-// the name for the symlink for tag.
-func logName(tag string, t time.Time) (name, link string) {
- name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
- program,
- host,
- getUserName(),
- tag,
- t.Year(),
- t.Month(),
- t.Day(),
- t.Hour(),
- t.Minute(),
- t.Second(),
- pid)
- return name, program + "." + tag
-}
-
-var onceLogDirs sync.Once
-
-// create creates a new log file and returns the file and its filename, which
-// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
-// successfully, create also attempts to update the symlink for that tag, ignoring
-// errors.
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
- if logging.logFile != "" {
- f, err := openOrCreate(logging.logFile, startup)
- if err == nil {
- return f, logging.logFile, nil
- }
- return nil, "", fmt.Errorf("log: unable to create log: %v", err)
- }
- onceLogDirs.Do(createLogDirs)
- if len(logDirs) == 0 {
- return nil, "", errors.New("log: no log dirs")
- }
- name, link := logName(tag, t)
- var lastErr error
- for _, dir := range logDirs {
- fname := filepath.Join(dir, name)
- f, err := openOrCreate(fname, startup)
- if err == nil {
- symlink := filepath.Join(dir, link)
- os.Remove(symlink) // ignore err
- os.Symlink(name, symlink) // ignore err
- return f, fname, nil
- }
- lastErr = err
- }
- return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
-}
-
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func openOrCreate(name string, startup bool) (*os.File, error) {
- if startup {
- f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
- return f, err
- }
- f, err := os.Create(name)
- return f, err
-}
diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go
deleted file mode 100644
index aa46726851..0000000000
--- a/vendor/k8s.io/klog/v2/klog_file_others.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package klog
-
-import (
- "os/user"
-)
-
-func getUserName() string {
- userNameOnce.Do(func() {
- current, err := user.Current()
- if err == nil {
- userName = current.Username
- }
- })
-
- return userName
-}
diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go
deleted file mode 100644
index 2517f9c538..0000000000
--- a/vendor/k8s.io/klog/v2/klog_file_windows.go
+++ /dev/null
@@ -1,34 +0,0 @@
-//go:build windows
-// +build windows
-
-package klog
-
-import (
- "os"
- "strings"
-)
-
-func getUserName() string {
- userNameOnce.Do(func() {
- // On Windows, the Go 'user' package requires netapi32.dll.
- // This affects Windows Nano Server:
- // https://github.com/golang/go/issues/21867
- // Fallback to using environment variables.
- u := os.Getenv("USERNAME")
- if len(u) == 0 {
- return
- }
- // Sanitize the USERNAME since it may contain filepath separators.
- u = strings.Replace(u, `\`, "_", -1)
-
- // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME'
- d := os.Getenv("USERDOMAIN")
- if len(d) != 0 {
- userName = d + "_" + u
- } else {
- userName = u
- }
- })
-
- return userName
-}
diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go
deleted file mode 100644
index 15de00e21f..0000000000
--- a/vendor/k8s.io/klog/v2/klogr.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
-Copyright 2021 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package klog
-
-import (
- "github.com/go-logr/logr"
-
- "k8s.io/klog/v2/internal/serialize"
-)
-
-// NewKlogr returns a logger that is functionally identical to
-// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The
-// difference is that it uses a simpler implementation.
-func NewKlogr() Logger {
- return New(&klogger{})
-}
-
-// klogger is a subset of klogr/klogr.go. It had to be copied to break an
-// import cycle (klogr wants to use klog, and klog wants to use klogr).
-type klogger struct {
- level int
- callDepth int
- prefix string
- values []interface{}
-}
-
-func (l *klogger) Init(info logr.RuntimeInfo) {
- l.callDepth += info.CallDepth
-}
-
-func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
- merged := serialize.MergeKVs(l.values, kvList)
- if l.prefix != "" {
- msg = l.prefix + ": " + msg
- }
- // Skip this function.
- VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
-}
-
-func (l *klogger) Enabled(level int) bool {
- // Skip this function and logr.Logger.Info where Enabled is called.
- return VDepth(l.callDepth+2, Level(level)).Enabled()
-}
-
-func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
- merged := serialize.MergeKVs(l.values, kvList)
- if l.prefix != "" {
- msg = l.prefix + ": " + msg
- }
- ErrorSDepth(l.callDepth+1, err, msg, merged...)
-}
-
-// WithName returns a new logr.Logger with the specified name appended. klogr
-// uses '/' characters to separate name elements. Callers should not pass '/'
-// in the provided name string, but this library does not actually enforce that.
-func (l klogger) WithName(name string) logr.LogSink {
- if len(l.prefix) > 0 {
- l.prefix = l.prefix + "/"
- }
- l.prefix += name
- return &l
-}
-
-func (l klogger) WithValues(kvList ...interface{}) logr.LogSink {
- l.values = serialize.WithValues(l.values, kvList)
- return &l
-}
-
-func (l klogger) WithCallDepth(depth int) logr.LogSink {
- l.callDepth += depth
- return &l
-}
-
-var _ logr.LogSink = &klogger{}
-var _ logr.CallDepthLogSink = &klogger{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ac7972f45a..484dde18c3 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -254,10 +254,6 @@ github.com/docker/go-connections/tlsconfig
github.com/docker/go-units
# github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5
## explicit; go 1.9
-# github.com/go-chi/chi v4.1.2+incompatible
-## explicit
-github.com/go-chi/chi
-github.com/go-chi/chi/middleware
# github.com/go-jose/go-jose/v3 v3.0.0
## explicit; go 1.12
github.com/go-jose/go-jose/v3
@@ -315,16 +311,6 @@ github.com/go-openapi/swag
# github.com/go-openapi/validate v0.22.1
## explicit; go 1.14
github.com/go-openapi/validate
-# github.com/go-playground/locales v0.14.1
-## explicit; go 1.17
-github.com/go-playground/locales
-github.com/go-playground/locales/currency
-# github.com/go-playground/universal-translator v0.18.1
-## explicit; go 1.18
-github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.13.0
-## explicit; go 1.18
-github.com/go-playground/validator/v10
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
github.com/gogo/protobuf/gogoproto
@@ -347,11 +333,9 @@ github.com/google/go-containerregistry/pkg/name
# github.com/google/go-intervals v0.0.2
## explicit; go 1.12
github.com/google/go-intervals/intervalset
-# github.com/google/trillian v1.5.1
-## explicit; go 1.17
+# github.com/google/trillian v1.5.2
+## explicit; go 1.19
github.com/google/trillian
-github.com/google/trillian/client
-github.com/google/trillian/client/backoff
github.com/google/trillian/types
github.com/google/trillian/types/internal/tls
# github.com/google/uuid v1.3.0
@@ -397,9 +381,6 @@ github.com/klauspost/compress/zstd/internal/xxhash
# github.com/klauspost/pgzip v1.2.6
## explicit
github.com/klauspost/pgzip
-# github.com/leodido/go-urn v1.2.3
-## explicit; go 1.16
-github.com/leodido/go-urn
# github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6
## explicit; go 1.18
github.com/letsencrypt/boulder/core
@@ -493,7 +474,7 @@ github.com/segmentio/ksuid
## explicit; go 1.20
github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate
-# github.com/sigstore/rekor v1.1.1
+# github.com/sigstore/rekor v1.2.2-0.20230529154427-55a5a338d149
## explicit; go 1.19
github.com/sigstore/rekor/pkg/client
github.com/sigstore/rekor/pkg/generated/client
@@ -502,7 +483,6 @@ github.com/sigstore/rekor/pkg/generated/client/index
github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
-github.com/sigstore/rekor/pkg/log
github.com/sigstore/rekor/pkg/util
# github.com/sigstore/sigstore v1.6.4
## explicit; go 1.18
@@ -547,12 +527,6 @@ github.com/theupdateframework/go-tuf/encrypted
# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
## explicit
github.com/titanous/rocacheck
-# github.com/transparency-dev/merkle v0.0.1
-## explicit; go 1.16
-github.com/transparency-dev/merkle
-github.com/transparency-dev/merkle/compact
-github.com/transparency-dev/merkle/proof
-github.com/transparency-dev/merkle/rfc6962
# github.com/ulikunitz/xz v0.5.11
## explicit; go 1.12
github.com/ulikunitz/xz
@@ -617,22 +591,7 @@ go.opentelemetry.io/otel/semconv/v1.12.0
# go.opentelemetry.io/otel/trace v1.15.0
## explicit; go 1.19
go.opentelemetry.io/otel/trace
-# go.uber.org/atomic v1.10.0
-## explicit; go 1.18
-go.uber.org/atomic
-# go.uber.org/multierr v1.11.0
-## explicit; go 1.19
-go.uber.org/multierr
-# go.uber.org/zap v1.24.0
-## explicit; go 1.19
-go.uber.org/zap
-go.uber.org/zap/buffer
-go.uber.org/zap/internal
-go.uber.org/zap/internal/bufferpool
-go.uber.org/zap/internal/color
-go.uber.org/zap/internal/exit
-go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.8.0
+# golang.org/x/crypto v0.9.0
## explicit; go 1.17
golang.org/x/crypto/cast5
golang.org/x/crypto/ed25519
@@ -659,7 +618,7 @@ golang.org/x/exp/slices
## explicit; go 1.17
golang.org/x/mod/semver
golang.org/x/mod/sumdb/note
-# golang.org/x/net v0.9.0
+# golang.org/x/net v0.10.0
## explicit; go 1.17
golang.org/x/net/context
golang.org/x/net/http/httpguts
@@ -691,15 +650,11 @@ golang.org/x/sys/windows
golang.org/x/term
# golang.org/x/text v0.9.0
## explicit; go 1.17
-golang.org/x/text/internal/language
-golang.org/x/text/internal/language/compact
-golang.org/x/text/internal/tag
-golang.org/x/text/language
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.7.0
+# golang.org/x/tools v0.8.0
## explicit; go 1.18
golang.org/x/tools/cmd/stringer
golang.org/x/tools/go/gcexportdata
@@ -729,7 +684,7 @@ google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.54.0
+# google.golang.org/grpc v1.55.0
## explicit; go 1.17
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -830,11 +785,3 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/klog/v2 v2.90.1
-## explicit; go 1.13
-k8s.io/klog/v2
-k8s.io/klog/v2/internal/buffer
-k8s.io/klog/v2/internal/clock
-k8s.io/klog/v2/internal/dbg
-k8s.io/klog/v2/internal/serialize
-k8s.io/klog/v2/internal/severity