diff --git a/go.mod b/go.mod index 7d3bd47c65..5f2acd1485 100644 --- a/go.mod +++ b/go.mod @@ -1,39 +1,37 @@ module github.com/hashicorp/terraform-provider-azuread require ( - github.com/hashicorp/go-azure-sdk v0.20230331.1143618 + github.com/hashicorp/go-azure-sdk v0.20230511.1094507 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1 github.com/manicminer/hamilton v0.61.0 - golang.org/x/text v0.7.0 + golang.org/x/text v0.9.0 ) require ( github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-azure-helpers v0.55.0 // indirect + github.com/hashicorp/go-azure-helpers v0.56.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-hclog v1.4.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.6 // indirect + github.com/hashicorp/go-plugin v1.4.8 // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect + github.com/hashicorp/hc-install v0.5.0 // indirect + github.com/hashicorp/hcl/v2 v2.16.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.3 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.1 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect - github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect + github.com/hashicorp/terraform-exec v0.18.1 // indirect + github.com/hashicorp/terraform-json v0.16.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.14.3 // indirect + github.com/hashicorp/terraform-plugin-log v0.8.0 // indirect + github.com/hashicorp/terraform-registry-address v0.1.0 // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -47,14 +45,15 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/net v0.7.0 // indirect + github.com/zclconf/go-cty v1.13.1 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.4.0 // indirect - golang.org/x/sys v0.5.0 // indirect + golang.org/x/sys v0.8.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect - google.golang.org/grpc v1.50.1 // indirect + google.golang.org/grpc v1.51.0 // indirect google.golang.org/protobuf v1.28.1 // indirect software.sslmate.com/src/go-pkcs12 v0.2.0 // indirect ) diff --git a/go.sum b/go.sum index a4f0a2a6dc..57ffcfd3c5 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,9 @@ github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYX github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -27,16 +30,16 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -108,15 +111,18 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-azure-helpers v0.55.0 h1:2A2KWPiaDC5kQWr6tYHTD/P1k9bO0HvflEb/Nc1yLeU= github.com/hashicorp/go-azure-helpers v0.55.0/go.mod h1:RQugkG8wEcNIjYmcBLHpuEI/u2mTJwO4r37rR/OKRpo= -github.com/hashicorp/go-azure-sdk v0.20230331.1143618 h1:SUBW7DehNG5zZhY9xvcvtNWg6xtwDas4rWz1r1mlM0U= +github.com/hashicorp/go-azure-helpers v0.56.0 h1:KxDXISHwWe4PKEz6FSSPG8vCXNrFGsCcCn/AI94ccig= +github.com/hashicorp/go-azure-helpers v0.56.0/go.mod h1:MbnCV9jPmlkbdH7VsoBK8IbCHvo3pLWRbRvq+F6u9sI= github.com/hashicorp/go-azure-sdk v0.20230331.1143618/go.mod h1:L9JXVUcnL0GjMizCnngYUlMp1lLhDBNgSTvn6Of/5O4= +github.com/hashicorp/go-azure-sdk v0.20230511.1094507 h1:uvmyrTZAHXtHzwkuHtjD889OyH3c4ul1IuPiegm/uag= +github.com/hashicorp/go-azure-sdk v0.20230511.1094507/go.mod h1:x2r7/U5MKlTHUO6/hFHRNO03qkRLBYyeQOuEUHkZmEg= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -127,12 +133,15 @@ github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUK github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= github.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= +github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -142,30 +151,41 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= +github.com/hashicorp/hc-install v0.5.0 h1:D9bl4KayIYKEeJ4vUDe9L5huqxZXczKaykSRcmQ0xY0= +github.com/hashicorp/hc-install v0.5.0/go.mod h1:JyzMfbzfSBSjoDCRPna1vi/24BEDxFaCPfdHtM5SCdo= github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= +github.com/hashicorp/terraform-exec v0.18.1 h1:LAbfDvNQU1l0NOQlTuudjczVhHj061fNX5H8XZxHlH4= +github.com/hashicorp/terraform-exec v0.18.1/go.mod h1:58wg4IeuAJ6LVsLUeD2DWZZoc/bYi6dzhLHzxM41980= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-go v0.14.1 h1:cwZzPYla82XwAqpLhSzdVsOMU+6H29tczAwrB0z9Zek= +github.com/hashicorp/terraform-json v0.16.0 h1:UKkeWRWb23do5LNAFlh/K3N0ymn1qTOO8c+85Albo3s= +github.com/hashicorp/terraform-json v0.16.0/go.mod h1:v0Ufk9jJnk6tcIZvScHvetlKfiNTC+WS21mnXIlc0B0= github.com/hashicorp/terraform-plugin-go v0.14.1/go.mod h1:Bc/K6K26BQ2FHqIELPbpKtt2CzzbQou+0UQF3/0NsCQ= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= +github.com/hashicorp/terraform-plugin-go v0.14.3 h1:nlnJ1GXKdMwsC8g1Nh05tK2wsC3+3BL/DBBxFEki+j0= +github.com/hashicorp/terraform-plugin-go v0.14.3/go.mod h1:7ees7DMZ263q8wQ6E4RdIdR6nHHJtrdt4ogX5lPkX1A= github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= +github.com/hashicorp/terraform-plugin-log v0.8.0 h1:pX2VQ/TGKu+UU1rCay0OlzosNKe4Nz1pepLXj95oyy0= +github.com/hashicorp/terraform-plugin-log v0.8.0/go.mod h1:1myFrhVsBLeylQzYYEV17VVjtG8oYPRFdaZs7xdW2xs= github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1 h1:G9WAfb8LHeCxu7Ae8nc1agZlQOSCUWsb610iAogBhCs= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1/go.mod h1:xcOSYlRVdPLmDUoqPhO9fiO/YCN/l6MGYeTzGt5jgkQ= github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= +github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= +github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -189,15 +209,19 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/manicminer/hamilton v0.61.0 h1:Hp25wOIkArXUJ9++U9J+jpVL+nXCzUtsvzVa/vE3Or4= github.com/manicminer/hamilton v0.61.0/go.mod h1:va/X2sztcgQ5+BSxc2eU3FTHYIyxLnHvB4LudlPUZdE= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -211,10 +235,10 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -223,6 +247,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= @@ -230,7 +255,9 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -238,6 +265,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -257,14 +285,17 @@ github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLE github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zclconf/go-cty v1.13.1 h1:0a6bRwuiSHtAmqCqNOE+c2oHgepv0ctoxU4FUe43kwc= +github.com/zclconf/go-cty v1.13.1/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -272,14 +303,18 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -302,8 +337,9 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -343,13 +379,14 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -357,8 +394,9 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -394,8 +432,9 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -426,6 +465,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/helpers/consistency.go b/internal/helpers/consistency.go index d1ef67dda2..0409d388c0 100644 --- a/internal/helpers/consistency.go +++ b/internal/helpers/consistency.go @@ -18,7 +18,7 @@ func WaitForDeletion(ctx context.Context, f ChangeFunc) error { } timeout := time.Until(deadline) - _, err := (&resource.StateChangeConf{ + _, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Deleted"}, Timeout: timeout, @@ -53,7 +53,7 @@ func WaitForUpdate(ctx context.Context, f ChangeFunc) error { } func WaitForUpdateWithTimeout(ctx context.Context, timeout time.Duration, f ChangeFunc) (bool, error) { - res, err := (&resource.StateChangeConf{ + res, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: timeout, diff --git a/internal/services/administrativeunits/administrative_unit_member_resource.go b/internal/services/administrativeunits/administrative_unit_member_resource.go index e9ae4a2c35..9c2bed05e2 100644 --- a/internal/services/administrativeunits/administrative_unit_member_resource.go +++ b/internal/services/administrativeunits/administrative_unit_member_resource.go @@ -111,7 +111,7 @@ func administrativeUnitMemberResourceCreate(ctx context.Context, d *schema.Resou return tf.ErrorDiagF(errors.New("context has no deadline"), "Waiting for member %q to reflect for administrative unit %q", id.MemberId, id.AdministrativeUnitId) } timeout := time.Until(deadline) - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: timeout, diff --git a/internal/services/applications/application_certificate_resource.go b/internal/services/applications/application_certificate_resource.go index d03c95182d..de64d816d6 100644 --- a/internal/services/applications/application_certificate_resource.go +++ b/internal/services/applications/application_certificate_resource.go @@ -175,7 +175,7 @@ func applicationCertificateResourceCreate(ctx context.Context, d *schema.Resourc // Wait for the credential to appear in the application manifest, this can take several minutes timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/applications/application_federated_identity_credential_resource.go b/internal/services/applications/application_federated_identity_credential_resource.go index efe68551ab..36c166062b 100644 --- a/internal/services/applications/application_federated_identity_credential_resource.go +++ b/internal/services/applications/application_federated_identity_credential_resource.go @@ -138,7 +138,7 @@ func applicationFederatedIdentityCredentialResourceCreate(ctx context.Context, d // Wait for the credential to replicate timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/applications/application_password_resource.go b/internal/services/applications/application_password_resource.go index b3889629f6..35b48d7801 100644 --- a/internal/services/applications/application_password_resource.go +++ b/internal/services/applications/application_password_resource.go @@ -164,7 +164,7 @@ func applicationPasswordResourceCreate(ctx context.Context, d *schema.ResourceDa // Wait for the credential to appear in the application manifest, this can take several minutes timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/applications/applications.go b/internal/services/applications/applications.go index 34d8421d2d..6e094f6836 100644 --- a/internal/services/applications/applications.go +++ b/internal/services/applications/applications.go @@ -121,7 +121,7 @@ func applicationDisableAppRoles(ctx context.Context, client *msgraph.Application return fmt.Errorf("context has no deadline") } timeout := time.Until(deadline) - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Disabled"}, Timeout: timeout, @@ -238,7 +238,7 @@ func applicationDisableOauth2PermissionScopes(ctx context.Context, client *msgra return fmt.Errorf("context has no deadline") } timeout := time.Until(deadline) - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Disabled"}, Timeout: timeout, diff --git a/internal/services/conditionalaccess/conditional_access_policy_resource.go b/internal/services/conditionalaccess/conditional_access_policy_resource.go index 2fc30555a6..73100b4b8b 100644 --- a/internal/services/conditionalaccess/conditional_access_policy_resource.go +++ b/internal/services/conditionalaccess/conditional_access_policy_resource.go @@ -525,7 +525,7 @@ func conditionalAccessPolicyResourceUpdate(ctx context.Context, d *schema.Resour // in a timeout loop, instead we're hoping that this allows enough time/activity for the update to be reflected. log.Printf("[DEBUG] Waiting for conditional access policy %q to be updated", d.Id()) timeout, _ := ctx.Deadline() - stateConf := &resource.StateChangeConf{ + stateConf := &resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Pending"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/conditionalaccess/named_location_resource.go b/internal/services/conditionalaccess/named_location_resource.go index a1376926b6..0ede2e2a3c 100644 --- a/internal/services/conditionalaccess/named_location_resource.go +++ b/internal/services/conditionalaccess/named_location_resource.go @@ -156,7 +156,7 @@ func namedLocationResourceUpdate(ctx context.Context, d *schema.ResourceData, me base.DisplayName = &displayName } - var updateRefreshFunc resource.StateRefreshFunc + var updateRefreshFunc resource.StateRefreshFunc //nolint:staticcheck if v, ok := d.GetOk("ip"); ok { properties := expandIPNamedLocation(v.([]interface{})) @@ -218,7 +218,7 @@ func namedLocationResourceUpdate(ctx context.Context, d *schema.ResourceData, me log.Printf("[DEBUG] Waiting for named location %q to be updated", d.Id()) timeout, _ := ctx.Deadline() - stateConf := &resource.StateChangeConf{ + stateConf := &resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Pending"}, Target: []string{"Updated"}, Timeout: time.Until(timeout), diff --git a/internal/services/directoryroles/directory_role_assignment_resource.go b/internal/services/directoryroles/directory_role_assignment_resource.go index c01918cc00..13f8cc937e 100644 --- a/internal/services/directoryroles/directory_role_assignment_resource.go +++ b/internal/services/directoryroles/directory_role_assignment_resource.go @@ -149,7 +149,7 @@ func directoryRoleAssignmentResourceCreate(ctx context.Context, d *schema.Resour return tf.ErrorDiagF(errors.New("context has no deadline"), "Waiting for directory role %q assignment to principal %q to take effect", roleId, principalId) } timeout := time.Until(deadline) - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: timeout, diff --git a/internal/services/directoryroles/directory_role_member_resource.go b/internal/services/directoryroles/directory_role_member_resource.go index 9ac0e3ec08..f3dcc78090 100644 --- a/internal/services/directoryroles/directory_role_member_resource.go +++ b/internal/services/directoryroles/directory_role_member_resource.go @@ -113,7 +113,7 @@ func directoryRoleMemberResourceCreate(ctx context.Context, d *schema.ResourceDa return tf.ErrorDiagF(errors.New("context has no deadline"), "Waiting for role member %q to reflect for directory role %q", id.MemberId, id.DirectoryRoleId) } timeout := time.Until(deadline) - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: timeout, diff --git a/internal/services/serviceprincipals/service_principal_certificate_resource.go b/internal/services/serviceprincipals/service_principal_certificate_resource.go index f255832872..e9d8f7f8fb 100644 --- a/internal/services/serviceprincipals/service_principal_certificate_resource.go +++ b/internal/services/serviceprincipals/service_principal_certificate_resource.go @@ -175,7 +175,7 @@ func servicePrincipalCertificateResourceCreate(ctx context.Context, d *schema.Re // Wait for the credential to appear in the service principal manifest, this can take several minutes timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/serviceprincipals/service_principal_password_resource.go b/internal/services/serviceprincipals/service_principal_password_resource.go index a4c9e9978a..1c8736fd80 100644 --- a/internal/services/serviceprincipals/service_principal_password_resource.go +++ b/internal/services/serviceprincipals/service_principal_password_resource.go @@ -164,7 +164,7 @@ func servicePrincipalPasswordResourceCreate(ctx context.Context, d *schema.Resou // Wait for the credential to appear in the service principal manifest, this can take several minutes timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/serviceprincipals/service_principal_token_signing_certificate_resource.go b/internal/services/serviceprincipals/service_principal_token_signing_certificate_resource.go index cd61d658d9..195022bf5a 100644 --- a/internal/services/serviceprincipals/service_principal_token_signing_certificate_resource.go +++ b/internal/services/serviceprincipals/service_principal_token_signing_certificate_resource.go @@ -124,7 +124,7 @@ func servicePrincipalTokenSigningCertificateResourceCreate(ctx context.Context, // Wait for the credential to appear in the service principal manifest, this can take several minutes timeout, _ := ctx.Deadline() - polledForCredential, err := (&resource.StateChangeConf{ + polledForCredential, err := (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/serviceprincipals/synchronization_job_resource.go b/internal/services/serviceprincipals/synchronization_job_resource.go index 1c605d34b5..c560574993 100644 --- a/internal/services/serviceprincipals/synchronization_job_resource.go +++ b/internal/services/serviceprincipals/synchronization_job_resource.go @@ -128,7 +128,7 @@ func synchronizationJobResourceCreate(ctx context.Context, d *schema.ResourceDat // Wait for the job to appear, this can take several moments timeout, _ := ctx.Deadline() - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/internal/services/serviceprincipals/synchronization_secret_resource.go b/internal/services/serviceprincipals/synchronization_secret_resource.go index ad4ef7dfef..14789a5191 100644 --- a/internal/services/serviceprincipals/synchronization_secret_resource.go +++ b/internal/services/serviceprincipals/synchronization_secret_resource.go @@ -99,7 +99,7 @@ func synchronizationSecretResourceCreate(ctx context.Context, d *schema.Resource // Wait for the secret to appear timeout, _ := ctx.Deadline() - _, err = (&resource.StateChangeConf{ + _, err = (&resource.StateChangeConf{ //nolint:staticcheck Pending: []string{"Waiting"}, Target: []string{"Done"}, Timeout: time.Until(timeout), diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE deleted file mode 100644 index 2125378860..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Martin Atkins - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go deleted file mode 100644 index 20823af041..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package cidr is a collection of assorted utilities for computing -// network and host addresses within network ranges. -// -// It expects a CIDR-type address structure where addresses are divided into -// some number of prefix bits representing the network and then the remaining -// suffix bits represent the host. -// -// For example, it can help to calculate addresses for sub-networks of a -// parent network, or to calculate host addresses within a particular prefix. -// -// At present this package is prioritizing simplicity of implementation and -// de-prioritizing speed and memory usage. Thus caution is advised before -// using this package in performance-critical applications or hot codepaths. -// Patches to improve the speed and memory usage may be accepted as long as -// they do not result in a significant increase in code complexity. -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -// Subnet takes a parent CIDR range and creates a subnet within it -// with the given number of additional prefix bits and the given -// network number. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number -// of 5, becomes 10.3.5.0/24 . -func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { - return SubnetBig(base, newBits, big.NewInt(int64(num))) -} - -// SubnetBig takes a parent CIDR range and creates a subnet within it with the -// given number of additional prefix bits and the given network number. It -// differs from Subnet in that it takes a *big.Int for the num, instead of an int. -// -// For example, 10.3.0.0/16, extended by 8 bits, with a network number of 5, -// becomes 10.3.5.0/24 . -func SubnetBig(base *net.IPNet, newBits int, num *big.Int) (*net.IPNet, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - newPrefixLen := parentLen + newBits - - if newPrefixLen > addrLen { - return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) - } - - maxNetNum := uint64(1< maxNetNum { - return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) - } - - return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), - Mask: net.CIDRMask(newPrefixLen, addrLen), - }, nil -} - -// Host takes a parent CIDR range and turns it into a host IP address with the -// given host number. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func Host(base *net.IPNet, num int) (net.IP, error) { - return HostBig(base, big.NewInt(int64(num))) -} - -// HostBig takes a parent CIDR range and turns it into a host IP address with -// the given host number. It differs from Host in that it takes a *big.Int for -// the num, instead of an int. -// -// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. -func HostBig(base *net.IPNet, num *big.Int) (net.IP, error) { - ip := base.IP - mask := base.Mask - - parentLen, addrLen := mask.Size() - hostLen := addrLen - parentLen - - maxHostNum := big.NewInt(int64(1)) - maxHostNum.Lsh(maxHostNum, uint(hostLen)) - maxHostNum.Sub(maxHostNum, big.NewInt(1)) - - numUint64 := big.NewInt(int64(num.Uint64())) - if num.Cmp(big.NewInt(0)) == -1 { - numUint64.Neg(num) - numUint64.Sub(numUint64, big.NewInt(int64(1))) - num.Sub(maxHostNum, numUint64) - } - - if numUint64.Cmp(maxHostNum) == 1 { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) - } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, num, bitlength), nil -} - -// AddressRange returns the first and last addresses in the given CIDR range. -func AddressRange(network *net.IPNet) (net.IP, net.IP) { - // the first IP is easy - firstIP := network.IP - - // the last IP is the network address OR NOT the mask address - prefixLen, bits := network.Mask.Size() - if prefixLen == bits { - // Easy! - // But make sure that our two slices are distinct, since they - // would be in all other cases. - lastIP := make([]byte, len(firstIP)) - copy(lastIP, firstIP) - return firstIP, lastIP - } - - firstIPInt, bits := ipToInt(firstIP) - hostLen := uint(bits) - uint(prefixLen) - lastIPInt := big.NewInt(1) - lastIPInt.Lsh(lastIPInt, hostLen) - lastIPInt.Sub(lastIPInt, big.NewInt(1)) - lastIPInt.Or(lastIPInt, firstIPInt) - - return firstIP, intToIP(lastIPInt, bits) -} - -// AddressCount returns the number of distinct host addresses within the given -// CIDR range. -// -// Since the result is a uint64, this function returns meaningful information -// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. -func AddressCount(network *net.IPNet) uint64 { - prefixLen, bits := network.Mask.Size() - return 1 << (uint64(bits) - uint64(prefixLen)) -} - -//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies -//none of the subnets overlap and all subnets are in the supernet -//it returns an error if any of those conditions are not satisfied -func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { - firstLastIP := make([][]net.IP, len(subnets)) - for i, s := range subnets { - first, last := AddressRange(s) - firstLastIP[i] = []net.IP{first, last} - } - for i, s := range subnets { - if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { - return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) - } - for j := 0; j < len(subnets); j++ { - if i == j { - continue - } - - first := firstLastIP[j][0] - last := firstLastIP[j][1] - if s.Contains(first) || s.Contains(last) { - return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) - } - } - } - return nil -} - -// PreviousSubnet returns the subnet of the desired mask in the IP space -// just lower than the start of IPNet provided. If the IP space rolls over -// then the second return value is true -func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - startIP := checkIPv4(network.IP) - previousIP := make(net.IP, len(startIP)) - copy(previousIP, startIP) - cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) - previousIP = Dec(previousIP) - previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} - if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { - return previous, true - } - return previous, false -} - -// NextSubnet returns the next available subnet of the desired mask size -// starting for the maximum IP of the offset subnet -// If the IP exceeds the maxium IP then the second return value is true -func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { - _, currentLast := AddressRange(network) - mask := net.CIDRMask(prefixLen, 8*len(currentLast)) - currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} - _, last := AddressRange(currentSubnet) - last = Inc(last) - next := &net.IPNet{IP: last.Mask(mask), Mask: mask} - if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { - return next, true - } - return next, false -} - -//Inc increases the IP by one this returns a new []byte for the IP -func Inc(IP net.IP) net.IP { - IP = checkIPv4(IP) - incIP := make([]byte, len(IP)) - copy(incIP, IP) - for j := len(incIP) - 1; j >= 0; j-- { - incIP[j]++ - if incIP[j] > 0 { - break - } - } - return incIP -} - -//Dec decreases the IP by one this returns a new []byte for the IP -func Dec(IP net.IP) net.IP { - IP = checkIPv4(IP) - decIP := make([]byte, len(IP)) - copy(decIP, IP) - decIP = checkIPv4(decIP) - for j := len(decIP) - 1; j >= 0; j-- { - decIP[j]-- - if decIP[j] < 255 { - break - } - } - return decIP -} - -func checkIPv4(ip net.IP) net.IP { - // Go for some reason allocs IPv6len for IPv4 so we have to correct it - if v4 := ip.To4(); v4 != nil { - return v4 - } - return ip -} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go deleted file mode 100644 index e5e6a2cf91..0000000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ /dev/null @@ -1,37 +0,0 @@ -package cidr - -import ( - "fmt" - "math/big" - "net" -) - -func ipToInt(ip net.IP) (*big.Int, int) { - val := &big.Int{} - val.SetBytes([]byte(ip)) - if len(ip) == net.IPv4len { - return val, 32 - } else if len(ip) == net.IPv6len { - return val, 128 - } else { - panic(fmt.Errorf("Unsupported address length %d", len(ip))) - } -} - -func intToIP(ipInt *big.Int, bits int) net.IP { - ipBytes := ipInt.Bytes() - ret := make([]byte, bits/8) - // Pack our IP bytes into the end of the return array, - // since big.Int.Bytes() removes front zero padding. - for i := 1; i <= len(ipBytes); i++ { - ret[len(ret)-i] = ipBytes[len(ipBytes)-i] - } - return net.IP(ret) -} - -func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { - ipInt, totalBits := ipToInt(ip) - bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) - ipInt.Or(ipInt, bigNum) - return intToIP(ipInt, totalBits) -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2b..0000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785e..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68c..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9457..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f312..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1e..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1f..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7a..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33882..0000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/auth.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/auth.go index 13d9a6e449..722e76171f 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/auth.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -8,9 +11,6 @@ import ( "github.com/hashicorp/go-azure-sdk/sdk/environments" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // NewAuthorizerFromCredentials returns a suitable Authorizer depending on what is defined in the Credentials // Authorizers are selected for authentication methods in the following preferential order: // - Client certificate authentication @@ -126,8 +126,9 @@ func NewAuthorizerFromCredentials(ctx context.Context, c Credentials, api enviro if c.EnableAuthenticatingUsingAzureCLI { opts := AzureCliAuthorizerOptions{ - Api: api, - TenantId: c.TenantID, + Api: api, + TenantId: c.TenantID, + AuxTenantIds: c.AuxiliaryTenantIDs, } a, err := NewAzureCliAuthorizer(ctx, opts) if err != nil { diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/azure_cli_authorizer.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/azure_cli_authorizer.go index 1bb5ce5f18..21de3aaa45 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/azure_cli_authorizer.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/azure_cli_authorizer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -7,15 +10,13 @@ import ( "net/http" "os" "strings" + "time" "github.com/hashicorp/go-azure-sdk/sdk/environments" "github.com/hashicorp/go-azure-sdk/sdk/internal/azurecli" "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - type AzureCliAuthorizerOptions struct { // Api describes the Azure API being used Api environments.Api @@ -81,8 +82,16 @@ func (a *AzureCliAuthorizer) Token(_ context.Context, _ *http.Request) (*oauth2. return nil, err } + var expiry time.Time + if token.ExpiresOn != "" { + if expiry, err = time.ParseInLocation("2006-01-02 15:04:05.999999", token.ExpiresOn, time.Local); err != nil { + return nil, fmt.Errorf("internal-error: parsing expiresOn value for az-cli token") + } + } + return &oauth2.Token{ AccessToken: token.AccessToken, + Expiry: expiry, TokenType: token.TokenType, }, nil } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/cached_authorizer.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/cached_authorizer.go index 0de357d0c5..6043dda77c 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/cached_authorizer.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/cached_authorizer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -9,9 +12,6 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - var _ Authorizer = &CachedAuthorizer{} // CachedAuthorizer caches a token until it expires, then acquires a new token from Source diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client.go index e5c39906d6..44a3d82247 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client.go @@ -17,6 +17,26 @@ import ( "github.com/hashicorp/go-retryablehttp" ) +var ( + // Client is the HTTP client used for sending authentication requests and obtaining tokens + Client HTTPClient + + // MetadataClient is the HTTP client used for obtaining tokens from the Instance Metadata Service + MetadataClient HTTPClient +) + +func init() { + Client = httpClient(defaultHttpClientParams()) + MetadataClient = httpClient(httpClientParams{ + instanceMetadataService: true, + + retryWaitMin: 2 * time.Second, + retryWaitMax: 60 * time.Second, + retryMaxCount: 5, + useProxy: false, + }) +} + type httpClientParams struct { instanceMetadataService bool diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_credentials.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_credentials.go index b3f5836afd..571a12ccd8 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_credentials.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_credentials.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -24,9 +27,6 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - type clientCredentialsType string const ( @@ -86,15 +86,15 @@ type clientCredentialsConfig struct { Audience string } -// TokenSource provides a source for obtaining access tokens using clientAssertionAuthorizer or clientSecretAuthorizer. +// TokenSource provides a source for obtaining access tokens using ClientAssertionAuthorizer or ClientSecretAuthorizer. func (c *clientCredentialsConfig) TokenSource(_ context.Context, authType clientCredentialsType) (Authorizer, error) { switch authType { case clientCredentialsAssertionType: - return NewCachedAuthorizer(&clientAssertionAuthorizer{ + return NewCachedAuthorizer(&ClientAssertionAuthorizer{ conf: c, }) case clientCredentialsSecretType: - return NewCachedAuthorizer(&clientSecretAuthorizer{ + return NewCachedAuthorizer(&ClientSecretAuthorizer{ conf: c, }) } @@ -192,13 +192,21 @@ func (c *clientAssertionToken) encode(key crypto.PrivateKey) (*string, error) { return &ret, nil } -var _ Authorizer = &clientAssertionAuthorizer{} +var _ Authorizer = &ClientAssertionAuthorizer{} -type clientAssertionAuthorizer struct { +type ClientAssertionAuthorizer struct { conf *clientCredentialsConfig } -func (a *clientAssertionAuthorizer) assertion(tokenUrl string) (*string, error) { +func (a *ClientAssertionAuthorizer) assertion(tokenUrl string) (*string, error) { + if a.conf == nil { + return nil, fmt.Errorf("internal-error: ClientAssertionAuthorizer not configured") + } + + if a.conf.Certificate == nil { + return nil, fmt.Errorf("internal-error: ClientAssertionAuthorizer misconfigured; Certificate was nil") + } + keySig := sha1.Sum(a.conf.Certificate.Raw) keyId := base64.URLEncoding.EncodeToString(keySig[:]) @@ -218,15 +226,20 @@ func (a *clientAssertionAuthorizer) assertion(tokenUrl string) (*string, error) Subject: a.conf.ClientID, }, } + assertion, err := t.encode(a.conf.PrivateKey) if err != nil { - return nil, fmt.Errorf("clientAssertionAuthorizer: failed to encode and sign JWT assertion: %v", err) + return nil, fmt.Errorf("ClientAssertionAuthorizer: failed to encode and sign JWT assertion: %v", err) } return assertion, nil } -func (a *clientAssertionAuthorizer) token(ctx context.Context, tokenUrl string) (*oauth2.Token, error) { +func (a *ClientAssertionAuthorizer) token(ctx context.Context, tokenUrl string) (*oauth2.Token, error) { + if a.conf == nil { + return nil, fmt.Errorf("internal-error: ClientAssertionAuthorizer not configured") + } + assertion := a.conf.FederatedAssertion if assertion == "" { a, err := a.assertion(tokenUrl) @@ -234,7 +247,7 @@ func (a *clientAssertionAuthorizer) token(ctx context.Context, tokenUrl string) return nil, err } if a == nil { - return nil, fmt.Errorf("clientAssertionAuthorizer: assertion was nil") + return nil, fmt.Errorf("ClientAssertionAuthorizer: assertion was nil") } assertion = *a } @@ -253,7 +266,7 @@ func (a *clientAssertionAuthorizer) token(ctx context.Context, tokenUrl string) return clientCredentialsToken(ctx, tokenUrl, &v) } -func (a *clientAssertionAuthorizer) Token(ctx context.Context, _ *http.Request) (*oauth2.Token, error) { +func (a *ClientAssertionAuthorizer) Token(ctx context.Context, _ *http.Request) (*oauth2.Token, error) { if a.conf == nil { return nil, fmt.Errorf("could not request token: conf is nil") } @@ -270,7 +283,7 @@ func (a *clientAssertionAuthorizer) Token(ctx context.Context, _ *http.Request) } // AuxiliaryTokens returns additional tokens for auxiliary tenant IDs, for use in multi-tenant scenarios -func (a *clientAssertionAuthorizer) AuxiliaryTokens(ctx context.Context, _ *http.Request) ([]*oauth2.Token, error) { +func (a *ClientAssertionAuthorizer) AuxiliaryTokens(ctx context.Context, _ *http.Request) ([]*oauth2.Token, error) { if a.conf == nil { return nil, fmt.Errorf("could not request token: conf is nil") } @@ -310,8 +323,7 @@ func clientCredentialsToken(ctx context.Context, endpoint string, params *url.Va req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - client := httpClient(defaultHttpClientParams()) - resp, err := client.Do(req) + resp, err := Client.Do(req) if err != nil { return nil, fmt.Errorf("clientCredentialsToken: cannot request token: %v", err) } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_secret_authorizer.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_secret_authorizer.go index 6a3a46eeb2..dcbd3ffe85 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_secret_authorizer.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/client_secret_authorizer.go @@ -57,13 +57,13 @@ func NewClientSecretAuthorizer(ctx context.Context, options ClientSecretAuthoriz return conf.TokenSource(ctx, clientCredentialsSecretType) } -var _ Authorizer = &clientSecretAuthorizer{} +var _ Authorizer = &ClientSecretAuthorizer{} -type clientSecretAuthorizer struct { +type ClientSecretAuthorizer struct { conf *clientCredentialsConfig } -func (a *clientSecretAuthorizer) Token(ctx context.Context, _ *http.Request) (*oauth2.Token, error) { +func (a *ClientSecretAuthorizer) Token(ctx context.Context, _ *http.Request) (*oauth2.Token, error) { if a.conf == nil { return nil, fmt.Errorf("could not request token: conf is nil") } @@ -90,7 +90,7 @@ func (a *clientSecretAuthorizer) Token(ctx context.Context, _ *http.Request) (*o } // AuxiliaryTokens returns additional tokens for auxiliary tenant IDs, for use in multi-tenant scenarios -func (a *clientSecretAuthorizer) AuxiliaryTokens(ctx context.Context, _ *http.Request) ([]*oauth2.Token, error) { +func (a *ClientSecretAuthorizer) AuxiliaryTokens(ctx context.Context, _ *http.Request) ([]*oauth2.Token, error) { if a.conf == nil { return nil, fmt.Errorf("could not request token: conf is nil") } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/config.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/config.go index 58b408cedc..7be93f4fcb 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/config.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/config.go @@ -1,12 +1,12 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( "github.com/hashicorp/go-azure-sdk/sdk/environments" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // Credentials sets up NewAuthorizer to return an Authorizer based on the provided credentails. type Credentials struct { // Specifies the national cloud environment to use diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/github_oidc_authorizer.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/github_oidc_authorizer.go index 8f2c449423..a369ebf982 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/github_oidc_authorizer.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/github_oidc_authorizer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -12,9 +15,6 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - type GitHubOIDCAuthorizerOptions struct { // Api describes the Azure API being used Api environments.Api @@ -90,8 +90,7 @@ func (a *GitHubOIDCAuthorizer) githubAssertion(ctx context.Context, _ *http.Requ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", a.conf.IDTokenRequestToken)) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - client := httpClient(defaultHttpClientParams()) - resp, err := client.Do(req) + resp, err := Client.Do(req) if err != nil { return nil, fmt.Errorf("githubAssertion: cannot request token: %v", err) } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/interface.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/interface.go index 856d7aaf21..7620f5d76f 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/interface.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/interface.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -7,12 +10,14 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // Authorizer is anything that can return an access token for authorizing API connections type Authorizer interface { Token(ctx context.Context, request *http.Request) (*oauth2.Token, error) AuxiliaryTokens(ctx context.Context, request *http.Request) ([]*oauth2.Token, error) } + +// HTTPClient is an HTTP client used for sending authentication requests and obtaining tokens +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/managed_identity_authorizer.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/managed_identity_authorizer.go index b0f019b3bf..718e6693d9 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/managed_identity_authorizer.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/auth/managed_identity_authorizer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package auth import ( @@ -15,9 +18,6 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - type ManagedIdentityAuthorizerOptions struct { // Api describes the Azure API being used Api environments.Api @@ -166,18 +166,9 @@ func azureMetadata(ctx context.Context, url string) (body []byte, err error) { "Metadata": []string{"true"}, } - client := httpClient(httpClientParams{ - instanceMetadataService: true, - - retryWaitMin: 2 * time.Second, - retryWaitMax: 60 * time.Second, - retryMaxCount: 5, - useProxy: false, - }) - var resp *http.Response log.Printf("[DEBUG] Performing %s Request to %q", req.Method, url) - resp, err = client.Do(req) + resp, err = MetadataClient.Do(req) if err != nil { return } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/claims/claims.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/claims/claims.go index 85a0242a44..97683b0c65 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/claims/claims.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/claims/claims.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package claims import ( @@ -9,9 +12,6 @@ import ( "golang.org/x/oauth2" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // Claims is used to unmarshall the claims from a JWT issued by the Microsoft Identity Platform. type Claims struct { Audience string `json:"aud"` diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_endpoint.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_endpoint.go index 8b8de00872..1d0debe5dd 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_endpoint.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_endpoint.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package environments import ( @@ -7,9 +10,6 @@ import ( "github.com/hashicorp/go-azure-sdk/sdk/internal/metadata" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // FromEndpoint attempts to load an environment from the given Endpoint. func FromEndpoint(ctx context.Context, endpoint, name string) (*Environment, error) { env := baseEnvironmentWithName("FromEnvironment") diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_name.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_name.go index 1b2f11c320..d3475921c6 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_name.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/environments/from_name.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package environments import ( @@ -5,9 +8,6 @@ import ( "strings" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - func FromName(name string) (*Environment, error) { switch strings.ToLower(name) { case "china": diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/azurecli/azcli.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/azurecli/azcli.go index f879e3f02f..09896637dc 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/azurecli/azcli.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/azurecli/azcli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package azurecli import ( @@ -11,9 +14,6 @@ import ( "github.com/hashicorp/go-version" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // CheckAzVersion tries to determine the version of Azure CLI in the path and checks for a compatible version func CheckAzVersion(minVersion string, nextMajorVersion *string) error { var cliVersion *struct { diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/metadata/client.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/metadata/client.go index 1e85e86efd..4aa74c38f4 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/metadata/client.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/internal/metadata/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package metadata import ( @@ -10,9 +13,6 @@ import ( "net/http" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - // NOTE: this Client cannot use the base client since it'd cause a circular reference type Client struct { diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/errors.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/errors.go index 468b3c56a6..4962bddf75 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/errors.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/errors.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package odata import ( @@ -7,9 +10,6 @@ import ( "strings" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - const ( ErrorAddedObjectReferencesAlreadyExist = "One or more added object references already exist" ErrorCannotDeleteOrUpdateEnabledEntitlement = "Permission (scope or role) cannot be deleted or updated unless disabled first" diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/http.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/http.go index 98b5882be8..4bd32a87b4 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/http.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/http.go @@ -1,7 +1,7 @@ -package odata - // Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + +package odata import ( "bytes" diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/odata.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/odata.go index 9875e13bfb..9b26d87a8d 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/odata.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/odata.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package odata import ( @@ -9,9 +12,6 @@ import ( "github.com/hashicorp/go-uuid" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - const ODataVersion = "4.0" // TODO: support 4.01 - https://docs.oasis-open.org/odata/odata-json-format/v4.01/cs01/odata-json-format-v4.01-cs01.html#_Toc499720587 type Id string diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/query.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/query.go index c9c6c58211..7eb09c5b10 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/query.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/query.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp Inc. All rights reserved. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + package odata import ( @@ -8,9 +11,6 @@ import ( "strings" ) -// Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. - type ConsistencyLevel string const ( @@ -58,6 +58,9 @@ type Query struct { // Top specifies the page size of the result set Top int + + // DeltaToken is used to query a delta endpoint + DeltaToken string } // Headers returns an http.Header map containing OData specific headers @@ -124,6 +127,9 @@ func (q Query) Values() url.Values { if q.Top > 0 { p.Add("$top", strconv.Itoa(q.Top)) } + if q.DeltaToken != "" { + p.Add("$deltatoken", q.DeltaToken) + } return p } diff --git a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/types.go b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/types.go index 96a3157bef..cc404c3d42 100644 --- a/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/types.go +++ b/vendor/github.com/hashicorp/go-azure-sdk/sdk/odata/types.go @@ -1,7 +1,7 @@ -package odata - // Copyright (c) HashiCorp Inc. All rights reserved. -// Licensed under the MIT License. See NOTICE.txt in the project root for license information. +// Licensed under the MPL-2.0 License. See NOTICE.txt in the project root for license information. + +package odata type ShortType = string diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go index 9635c838b4..99cc176a41 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -8,7 +8,7 @@ import ( ) // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { switch opts.Color { diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go index 30859168ee..26f8cef8d1 100644 --- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -11,7 +11,7 @@ import ( ) // setColorization will mutate the values of this logger -// to approperately configure colorization options. It provides +// to appropriately configure colorization options. It provides // a wrapper to the output stream on Windows systems. func (l *intLogger) setColorization(opts *LoggerOptions) { switch opts.Color { diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index b9f00217ca..48ff1f3a4e 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -20,13 +20,13 @@ var ( ) // Default returns a globally held logger. This can be a good starting -// place, and then you can use .With() and .Name() to create sub-loggers +// place, and then you can use .With() and .Named() to create sub-loggers // to be used in more specific contexts. // The value of the Default logger can be set via SetDefault() or by // changing the options in DefaultOptions. // // This method is goroutine safe, returning a global from memory, but -// cause should be used if SetDefault() is called it random times +// care should be used if SetDefault() is called it random times // in the program as that may result in race conditions and an unexpected // Logger being returned. func Default() Logger { diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 83232f7a62..89d26c9b01 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -17,6 +17,8 @@ import ( "sync" "sync/atomic" "time" + "unicode" + "unicode/utf8" "github.com/fatih/color" ) @@ -48,6 +50,12 @@ var ( Warn: color.New(color.FgHiYellow), Error: color.New(color.FgHiRed), } + + faintBoldColor = color.New(color.Faint, color.Bold) + faintColor = color.New(color.Faint) + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") ) // Make sure that intLogger is a Logger @@ -70,6 +78,7 @@ type intLogger struct { level *int32 headerColor ColorOption + fieldColor ColorOption implied []interface{} @@ -115,14 +124,19 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex = new(sync.Mutex) } - var primaryColor, headerColor ColorOption - - if opts.ColorHeaderOnly { - primaryColor = ColorOff + var ( + primaryColor ColorOption = ColorOff + headerColor ColorOption = ColorOff + fieldColor ColorOption = ColorOff + ) + switch { + case opts.ColorHeaderOnly: headerColor = opts.Color - } else { + case opts.ColorHeaderAndFields: + fieldColor = opts.Color + headerColor = opts.Color + default: primaryColor = opts.Color - headerColor = ColorOff } l := &intLogger{ @@ -137,6 +151,7 @@ func newLogger(opts *LoggerOptions) *intLogger { exclude: opts.Exclude, independentLevels: opts.IndependentLevels, headerColor: headerColor, + fieldColor: fieldColor, } if opts.IncludeLocation { l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset @@ -160,7 +175,7 @@ func newLogger(opts *LoggerOptions) *intLogger { } // offsetIntLogger is the stack frame offset in the call stack for the caller to -// one of the Warn,Info,Log,etc methods. +// one of the Warn, Info, Log, etc methods. const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at @@ -235,7 +250,17 @@ func needsQuoting(str string) bool { return false } -// Non-JSON logging format function +// logPlain is the non-JSON logging format function which writes directly +// to the underlying writer the logger was initialized with. +// +// If the logger was initialized with a color function, it also handles +// applying the color to the log message. +// +// Color Options +// 1. No color. +// 2. Color the whole log line, based on the level. +// 3. Color only the header (level) part of the log line. +// 4. Color both the header and fields of the log line. func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { if !l.disableTime { @@ -269,16 +294,19 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, if name != "" { l.writer.WriteString(name) - l.writer.WriteString(": ") + if msg != "" { + l.writer.WriteString(": ") + l.writer.WriteString(msg) + } + } else if msg != "" { + l.writer.WriteString(msg) } - l.writer.WriteString(msg) - args = append(l.implied, args...) var stacktrace CapturedStacktrace - if args != nil && len(args) > 0 { + if len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) if ok { @@ -292,13 +320,16 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteByte(':') + // Handle the field arguments, which come in pairs (key=val). FOR: for i := 0; i < len(args); i = i + 2 { var ( + key string val string raw bool ) + // Convert the field value to a string. switch st := args[i+1].(type) { case string: val = st @@ -350,8 +381,7 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, } } - var key string - + // Convert the field key to a string. switch st := args[i].(type) { case string: key = st @@ -359,21 +389,49 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, key = fmt.Sprintf("%s", st) } + // Optionally apply the ANSI "faint" and "bold" + // SGR values to the key. + if l.fieldColor != ColorOff { + key = faintBoldColor.Sprint(key) + } + + // Values may contain multiple lines, and that format + // is preserved, with each line prefixed with a " | " + // to show it's part of a collection of lines. + // + // Values may also need quoting, if not all the runes + // in the value string are "normal", like if they + // contain ANSI escape sequences. if strings.Contains(val, "\n") { l.writer.WriteString("\n ") l.writer.WriteString(key) - l.writer.WriteString("=\n") - writeIndent(l.writer, val, " | ") + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparatorWithNewLine) + writeIndent(l.writer, val, faintMultiLinePrefix) + } else { + l.writer.WriteString("=\n") + writeIndent(l.writer, val, " | ") + } l.writer.WriteString(" ") } else if !raw && needsQuoting(val) { l.writer.WriteByte(' ') l.writer.WriteString(key) - l.writer.WriteByte('=') - l.writer.WriteString(strconv.Quote(val)) + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } + l.writer.WriteByte('"') + writeEscapedForOutput(l.writer, val, true) + l.writer.WriteByte('"') } else { l.writer.WriteByte(' ') l.writer.WriteString(key) - l.writer.WriteByte('=') + if l.fieldColor != ColorOff { + l.writer.WriteString(faintFieldSeparator) + } else { + l.writer.WriteByte('=') + } l.writer.WriteString(val) } } @@ -393,19 +451,98 @@ func writeIndent(w *writer, str string, indent string) { if nl == -1 { if str != "" { w.WriteString(indent) - w.WriteString(str) + writeEscapedForOutput(w, str, false) w.WriteString("\n") } return } w.WriteString(indent) - w.WriteString(str[:nl]) + writeEscapedForOutput(w, str[:nl], false) w.WriteString("\n") str = str[nl+1:] } } +func needsEscaping(str string) bool { + for _, b := range str { + if !unicode.IsPrint(b) || b == '"' { + return true + } + } + + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) { + if !needsEscaping(str) { + w.Write([]byte(str)) + return + } + + bb := bufPool.Get().(*bytes.Buffer) + bb.Reset() + + defer bufPool.Put(bb) + + for _, r := range str { + if escapeQuotes && r == '"' { + bb.WriteString(`\"`) + } else if unicode.IsPrint(r) { + bb.WriteRune(r) + } else { + switch r { + case '\a': + bb.WriteString(`\a`) + case '\b': + bb.WriteString(`\b`) + case '\f': + bb.WriteString(`\f`) + case '\n': + bb.WriteString(`\n`) + case '\r': + bb.WriteString(`\r`) + case '\t': + bb.WriteString(`\t`) + case '\v': + bb.WriteString(`\v`) + default: + switch { + case r < ' ': + bb.WriteString(`\x`) + bb.WriteByte(lowerhex[byte(r)>>4]) + bb.WriteByte(lowerhex[byte(r)&0xF]) + case !utf8.ValidRune(r): + r = 0xFFFD + fallthrough + case r < 0x10000: + bb.WriteString(`\u`) + for s := 12; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + default: + bb.WriteString(`\U`) + for s := 28; s >= 0; s -= 4 { + bb.WriteByte(lowerhex[r>>uint(s)&0xF]) + } + } + } + } + } + + w.Write(bb.Bytes()) +} + func (l *intLogger) renderSlice(v reflect.Value) string { var buf bytes.Buffer @@ -707,6 +844,11 @@ func (l *intLogger) SetLevel(level Level) { atomic.StoreInt32(l.level, int32(level)) } +// Returns the current level +func (l *intLogger) GetLevel() Level { + return Level(atomic.LoadInt32(l.level)) +} + // Create a *log.Logger that will send it's data through this Logger. This // allows packages that expect to be using the standard library log to actually // use this logger. diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 8581430284..3cdb2837d7 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -9,7 +9,7 @@ import ( ) var ( - //DefaultOutput is used as the default log output. + // DefaultOutput is used as the default log output. DefaultOutput io.Writer = os.Stderr // DefaultLevel is used as the default log level. @@ -28,7 +28,7 @@ const ( // of actions in code, such as function enters/exits, etc. Trace Level = 1 - // Debug information for programmer lowlevel analysis. + // Debug information for programmer low-level analysis. Debug Level = 2 // Info information about steady state operations. @@ -44,13 +44,13 @@ const ( Off Level = 6 ) -// Format is a simple convience type for when formatting is required. When +// Format is a simple convenience type for when formatting is required. When // processing a value of this type, the logger automatically treats the first // argument as a Printf formatting string and passes the rest as the values // to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). type Format []interface{} -// Fmt returns a Format type. This is a convience function for creating a Format +// Fmt returns a Format type. This is a convenience function for creating a Format // type. func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) @@ -134,7 +134,7 @@ func (l Level) String() string { } } -// Logger describes the interface that must be implemeted by all loggers. +// Logger describes the interface that must be implemented by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings @@ -198,6 +198,9 @@ type Logger interface { // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) + // Returns the current level + GetLevel() Level + // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger @@ -236,7 +239,7 @@ type LoggerOptions struct { // Name of the subsystem to prefix logs with Name string - // The threshold for the logger. Anything less severe is supressed + // The threshold for the logger. Anything less severe is suppressed Level Level // Where to write the logs to. Defaults to os.Stderr if nil @@ -267,13 +270,17 @@ type LoggerOptions struct { // because setting TimeFormat to empty assumes the default format. DisableTime bool - // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // Color the output. On Windows, colored logs are only available for io.Writers that // are concretely instances of *os.File. Color ColorOption // Only color the header, not the body. This can help with readability of long messages. ColorHeaderOnly bool + // Color the header and message body fields. This can help with readability + // of long messages with multiple fields. + ColorHeaderAndFields bool + // A function which is called with the log information and if it returns true the value // should not be logged. // This is useful when interacting with a system that you wish to suppress the log @@ -282,8 +289,8 @@ type LoggerOptions struct { // IndependentLevels causes subloggers to be created with an independent // copy of this logger's level. This means that using SetLevel on this - // logger will not effect any subloggers, and SetLevel on any subloggers - // will not effect the parent or sibling loggers. + // logger will not affect any subloggers, and SetLevel on any subloggers + // will not affect the parent or sibling loggers. IndependentLevels bool } diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index bc14f77080..55e89dd31c 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -49,6 +49,8 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l } func (l *nullLogger) SetLevel(level Level) {} +func (l *nullLogger) GetLevel() Level { return NoLevel } + func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { return log.New(l.StandardWriter(opts), "", log.LstdFlags) } diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md index 8341962886..d40ad61361 100644 --- a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -1,3 +1,15 @@ +## v1.4.8 + +BUG FIXES: + +* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] + +## v1.4.7 + +ENHANCEMENTS: + +* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] + ## v1.4.6 BUG FIXES: diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 2e86f6213e..d0baf7e8d7 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -26,6 +26,14 @@ import ( "google.golang.org/grpc" ) +const unrecognizedRemotePluginMessage = `Unrecognized remote plugin message: %s +This usually means + the plugin was not compiled for this architecture, + the plugin is missing dynamic-link libraries necessary to run, + the plugin is not executable by this process due to file permissions, or + the plugin failed to negotiate the initial go-plugin protocol handshake +%s` + // If this is 1, then we've called CleanupClients. This can be used // by plugin RPC implementations to change error behavior since you // can expected network connection errors at this point. This should be @@ -473,7 +481,17 @@ func (c *Client) Kill() { c.l.Unlock() } -// Starts the underlying subprocess, communicating with it to negotiate +// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format +// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. +var peTypes = map[uint16]string{ + 0x14c: "386", + 0x1c0: "arm", + 0x6264: "loong64", + 0x8664: "amd64", + 0xaa64: "arm64", +} + +// Start the underlying subprocess, communicating with it to negotiate // a port for RPC connections, and returning the address to connect via RPC. // // This method is safe to call multiple times. Subsequent calls have no effect. @@ -697,10 +715,7 @@ func (c *Client) Start() (addr net.Addr, err error) { line = strings.TrimSpace(line) parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { - err = fmt.Errorf( - "Unrecognized remote plugin message: %s\n\n"+ - "This usually means that the plugin is either invalid or simply\n"+ - "needs to be recompiled to support the latest protocol.", line) + err = fmt.Errorf(unrecognizedRemotePluginMessage, line, additionalNotesAboutCommand(cmd.Path)) return } diff --git a/vendor/github.com/hashicorp/go-plugin/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/notes_unix.go new file mode 100644 index 0000000000..dae1c411de --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/notes_unix.go @@ -0,0 +1,64 @@ +//go:build !windows +// +build !windows + +package plugin + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "os/user" + "runtime" + "strconv" + "syscall" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + statT, ok := stat.Sys().(*syscall.Stat_t) + if ok { + currentUsername := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { + currentUsername = u.Username + } + currentGroup := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { + currentGroup = g.Name + } + username := "?" + if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { + username = u.Username + } + group := "?" + if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { + group = g.Name + } + notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) + notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) + } + + if elfFile, err := elf.Open(path); err == nil { + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/go-plugin/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/notes_windows.go new file mode 100644 index 0000000000..900b93319c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/notes_windows.go @@ -0,0 +1,40 @@ +//go:build windows +// +build windows + +package plugin + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "runtime" +) + +// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose +// why it won't run correctly. It runs as a best effort only. +func additionalNotesAboutCommand(path string) string { + notes := "" + stat, err := os.Stat(path) + if err != nil { + return notes + } + + notes += "\nAdditional notes about plugin:\n" + notes += fmt.Sprintf(" Path: %s\n", path) + notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) + + if elfFile, err := elf.Open(path); err == nil { + notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) + } else if machoFile, err := macho.Open(path); err == nil { + notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) + } else if peFile, err := pe.Open(path); err == nil { + machine, ok := peTypes[peFile.Machine] + if !ok { + machine = "unknown" + } + notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) + } + return notes +} diff --git a/vendor/github.com/hashicorp/hc-install/.goreleaser.yml b/vendor/github.com/hashicorp/hc-install/.goreleaser.yml deleted file mode 100644 index 5e5832867c..0000000000 --- a/vendor/github.com/hashicorp/hc-install/.goreleaser.yml +++ /dev/null @@ -1,29 +0,0 @@ -project_name: tfinstall -builds: - - env: - - CGO_ENABLED=0 - main: ./cmd/hcinstall/main.go - mod_timestamp: '{{ .CommitTimestamp }}' - id: "tfinstall" - binary: tfinstall - flags: - - -trimpath - ldflags: - - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' - goos: - - linux - - darwin - - windows - goarch: - - amd64 - - arm - - arm64 -archives: - - files: [] - format: zip - name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' -checksum: - name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' - algorithm: sha256 -changelog: - skip: true diff --git a/vendor/github.com/hashicorp/hc-install/LICENSE b/vendor/github.com/hashicorp/hc-install/LICENSE index a612ad9813..c121cee6e5 100644 --- a/vendor/github.com/hashicorp/hc-install/LICENSE +++ b/vendor/github.com/hashicorp/hc-install/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2020 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go index 04fa241609..2d52b339f9 100644 --- a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go +++ b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go @@ -123,7 +123,10 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if lv.ArmoredPublicKey != "" { d.ArmoredPublicKey = lv.ArmoredPublicKey } - err = d.DownloadAndUnpack(ctx, pv, dstDir) + zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir) + if zipFilePath != "" { + lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + } if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go index 2f3f832544..9581c322cc 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go +++ b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go @@ -11,6 +11,7 @@ import ( "path/filepath" "github.com/hashicorp/go-version" + "golang.org/x/mod/modfile" ) var discardLogger = log.New(ioutil.Discard, "", 0) @@ -37,23 +38,47 @@ func (gb *GoBuild) log() *log.Logger { // Build runs "go build" within a given repo to produce binaryName in targetDir func (gb *GoBuild) Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) { - goCmd, cleanupFunc, err := gb.ensureRequiredGoVersion(ctx, repoDir) + reqGo, err := gb.ensureRequiredGoVersion(ctx, repoDir) if err != nil { return "", err } - defer cleanupFunc(ctx) + defer reqGo.CleanupFunc(ctx) - goArgs := []string{"build", "-o", filepath.Join(targetDir, binaryName)} + if reqGo.Version == nil { + gb.logger.Println("building using default available Go") + } else { + gb.logger.Printf("building using Go %s", reqGo.Version) + } + + // `go build` would download dependencies as a side effect, but we attempt + // to do it early in a separate step, such that we can easily distinguish + // network failures from build failures. + // + // Note, that `go mod download` was introduced in Go 1.11 + // See https://github.com/golang/go/commit/9f4ea6c2 + minGoVersion := version.Must(version.NewVersion("1.11")) + if reqGo.Version.GreaterThanOrEqual(minGoVersion) { + downloadArgs := []string{"mod", "download"} + gb.log().Printf("executing %s %q in %q", reqGo.Cmd, downloadArgs, repoDir) + cmd := exec.CommandContext(ctx, reqGo.Cmd, downloadArgs...) + cmd.Dir = repoDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("unable to download dependencies: %w\n%s", err, out) + } + } + + buildArgs := []string{"build", "-o", filepath.Join(targetDir, binaryName)} if gb.DetectVendoring { vendorDir := filepath.Join(repoDir, "vendor") if fi, err := os.Stat(vendorDir); err == nil && fi.IsDir() { - goArgs = append(goArgs, "-mod", "vendor") + buildArgs = append(buildArgs, "-mod", "vendor") } } - gb.log().Printf("executing %s %q in %q", goCmd, goArgs, repoDir) - cmd := exec.CommandContext(ctx, goCmd, goArgs...) + gb.log().Printf("executing %s %q in %q", reqGo.Cmd, buildArgs, repoDir) + cmd := exec.CommandContext(ctx, reqGo.Cmd, buildArgs...) cmd.Dir = repoDir out, err := cmd.CombinedOutput() if err != nil { @@ -71,35 +96,59 @@ func (gb *GoBuild) Remove(ctx context.Context) error { return os.RemoveAll(gb.pathToRemove) } -func (gb *GoBuild) ensureRequiredGoVersion(ctx context.Context, repoDir string) (string, CleanupFunc, error) { +type Go struct { + Cmd string + CleanupFunc CleanupFunc + Version *version.Version +} + +func (gb *GoBuild) ensureRequiredGoVersion(ctx context.Context, repoDir string) (Go, error) { cmdName := "go" noopCleanupFunc := func(context.Context) {} + var installedVersion *version.Version + if gb.Version != nil { + gb.logger.Printf("attempting to satisfy explicit requirement for Go %s", gb.Version) goVersion, err := GetGoVersion(ctx) if err != nil { - return cmdName, noopCleanupFunc, err + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + }, err } if !goVersion.GreaterThanOrEqual(gb.Version) { // found incompatible version, try downloading the desired one return gb.installGoVersion(ctx, gb.Version) } + installedVersion = goVersion } if requiredVersion, ok := guessRequiredGoVersion(repoDir); ok { + gb.logger.Printf("attempting to satisfy guessed Go requirement %s", requiredVersion) goVersion, err := GetGoVersion(ctx) if err != nil { - return cmdName, noopCleanupFunc, err + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + }, err } if !goVersion.GreaterThanOrEqual(requiredVersion) { // found incompatible version, try downloading the desired one return gb.installGoVersion(ctx, requiredVersion) } + installedVersion = goVersion + } else { + gb.logger.Println("unable to guess Go requirement") } - return cmdName, noopCleanupFunc, nil + return Go{ + Cmd: cmdName, + CleanupFunc: noopCleanupFunc, + Version: installedVersion, + }, nil } // CleanupFunc represents a function to be called once Go is no longer needed @@ -119,5 +168,26 @@ func guessRequiredGoVersion(repoDir string) (*version.Version, bool) { } return requiredVersion, true } + + goModFile := filepath.Join(repoDir, "go.mod") + if fi, err := os.Stat(goModFile); err == nil && !fi.IsDir() { + b, err := ioutil.ReadFile(goModFile) + if err != nil { + return nil, false + } + f, err := modfile.ParseLax(fi.Name(), b, nil) + if err != nil { + return nil, false + } + if f.Go == nil { + return nil, false + } + requiredVersion, err := version.NewVersion(f.Go.Version) + if err != nil { + return nil, false + } + return requiredVersion, true + } + return nil, false } diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go index f97c859dc1..a6610e076f 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go +++ b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go @@ -12,28 +12,36 @@ import ( // installGoVersion installs given version of Go using Go // according to https://golang.org/doc/manage-install -func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (string, CleanupFunc, error) { - // trim 0 patch versions as that's how Go does it :shrug: - shortVersion := strings.TrimSuffix(v.String(), ".0") +func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (Go, error) { + versionString := v.Core().String() + // trim 0 patch versions as that's how Go does it :shrug: + shortVersion := strings.TrimSuffix(versionString, ".0") pkgURL := fmt.Sprintf("golang.org/dl/go%s", shortVersion) gb.log().Printf("go getting %q", pkgURL) cmd := exec.CommandContext(ctx, "go", "get", pkgURL) out, err := cmd.CombinedOutput() if err != nil { - return "", nil, fmt.Errorf("unable to install Go %s: %w\n%s", v, err, out) + return Go{}, fmt.Errorf("unable to get Go %s: %w\n%s", v, err, out) + } + + gb.log().Printf("go installing %q", pkgURL) + cmd = exec.CommandContext(ctx, "go", "install", pkgURL) + out, err = cmd.CombinedOutput() + if err != nil { + return Go{}, fmt.Errorf("unable to install Go %s: %w\n%s", v, err, out) } cmdName := fmt.Sprintf("go%s", shortVersion) - gb.log().Printf("downloading go %q", shortVersion) + gb.log().Printf("downloading go %q", v) cmd = exec.CommandContext(ctx, cmdName, "download") out, err = cmd.CombinedOutput() if err != nil { - return "", nil, fmt.Errorf("unable to download Go %s: %w\n%s", v, err, out) + return Go{}, fmt.Errorf("unable to download Go %s: %w\n%s", v, err, out) } - gb.log().Printf("download of go %q finished", shortVersion) + gb.log().Printf("download of go %q finished", v) cleanupFunc := func(ctx context.Context) { cmd = exec.CommandContext(ctx, cmdName, "env", "GOROOT") @@ -49,5 +57,9 @@ func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (st } } - return cmdName, cleanupFunc, nil + return Go{ + Cmd: cmdName, + CleanupFunc: cleanupFunc, + Version: v, + }, nil } diff --git a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go index 159f705058..0ae6ae4af8 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go +++ b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go @@ -5,7 +5,7 @@ import ( "net/http" "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/hc-install/internal/version" + "github.com/hashicorp/hc-install/version" ) // NewHTTPClient provides a pre-configured http.Client @@ -13,7 +13,7 @@ import ( func NewHTTPClient() *http.Client { client := cleanhttp.DefaultClient() - userAgent := fmt.Sprintf("hc-install/%s", version.ModuleVersion()) + userAgent := fmt.Sprintf("hc-install/%s", version.Version()) cli := cleanhttp.DefaultPooledClient() cli.Transport = &userAgentRoundTripper{ diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go index 2b85ea3449..c012a20aab 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go @@ -2,11 +2,13 @@ package releasesjson import ( "bytes" + "context" "crypto/sha256" "encoding/hex" "fmt" "io" "log" + "net/http" "net/url" "strings" @@ -42,7 +44,7 @@ func HashSumFromHexDigest(hexDigest string) (HashSum, error) { return HashSum(sumBytes), nil } -func (cd *ChecksumDownloader) DownloadAndVerifyChecksums() (ChecksumFileMap, error) { +func (cd *ChecksumDownloader) DownloadAndVerifyChecksums(ctx context.Context) (ChecksumFileMap, error) { sigFilename, err := cd.findSigFilename(cd.ProductVersion) if err != nil { return nil, err @@ -54,7 +56,12 @@ func (cd *ChecksumDownloader) DownloadAndVerifyChecksums() (ChecksumFileMap, err url.PathEscape(cd.ProductVersion.RawVersion), url.PathEscape(sigFilename)) cd.Logger.Printf("downloading signature from %s", sigURL) - sigResp, err := client.Get(sigURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", sigURL, err) + } + sigResp, err := client.Do(req) if err != nil { return nil, err } @@ -70,7 +77,12 @@ func (cd *ChecksumDownloader) DownloadAndVerifyChecksums() (ChecksumFileMap, err url.PathEscape(cd.ProductVersion.RawVersion), url.PathEscape(cd.ProductVersion.SHASUMS)) cd.Logger.Printf("downloading checksums from %s", shasumsURL) - sumsResp, err := client.Get(shasumsURL) + + req, err = http.NewRequestWithContext(ctx, http.MethodGet, shasumsURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", shasumsURL, err) + } + sumsResp, err := client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go index e9cd94e43f..8b2097d761 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go @@ -8,10 +8,12 @@ import ( "io" "io/ioutil" "log" + "net/http" "net/url" "os" "path/filepath" "runtime" + "strings" "github.com/hashicorp/hc-install/internal/httpclient" ) @@ -23,14 +25,14 @@ type Downloader struct { BaseURL string } -func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, dstDir string) error { +func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, dstDir string) (zipFilePath string, err error) { if len(pv.Builds) == 0 { - return fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) + return "", fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version) } pb, ok := pv.Builds.FilterBuild(runtime.GOOS, runtime.GOARCH, "zip") if !ok { - return fmt.Errorf("no ZIP archive found for %s %s %s/%s", + return "", fmt.Errorf("no ZIP archive found for %s %s %s/%s", pv.Name, pv.Version, runtime.GOOS, runtime.GOARCH) } @@ -42,14 +44,14 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, Logger: d.Logger, ArmoredPublicKey: d.ArmoredPublicKey, } - verifiedChecksums, err := v.DownloadAndVerifyChecksums() + verifiedChecksums, err := v.DownloadAndVerifyChecksums(ctx) if err != nil { - return err + return "", err } var ok bool verifiedChecksum, ok = verifiedChecksums[pb.Filename] if !ok { - return fmt.Errorf("no checksum found for %q", pb.Filename) + return "", fmt.Errorf("no checksum found for %q", pb.Filename) } } @@ -61,12 +63,12 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, // are still pointing to the mock server if one is set baseURL, err := url.Parse(d.BaseURL) if err != nil { - return err + return "", err } u, err := url.Parse(archiveURL) if err != nil { - return err + return "", err } u.Scheme = baseURL.Scheme u.Host = baseURL.Host @@ -74,13 +76,18 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, } d.Logger.Printf("downloading archive from %s", archiveURL) - resp, err := client.Get(archiveURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, archiveURL, nil) + if err != nil { + return "", fmt.Errorf("failed to create request for %q: %w", archiveURL, err) + } + resp, err := client.Do(req) if err != nil { - return err + return "", err } if resp.StatusCode != 200 { - return fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) + return "", fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status) } defer resp.Body.Close() @@ -90,7 +97,7 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, contentType := resp.Header.Get("content-type") if !contentTypeIsZip(contentType) { - return fmt.Errorf("unexpected content-type: %s (expected any of %q)", + return "", fmt.Errorf("unexpected content-type: %s (expected any of %q)", contentType, zipMimeTypes) } @@ -105,15 +112,16 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, err := compareChecksum(d.Logger, r, verifiedChecksum, pb.Filename, expectedSize) if err != nil { - return err + return "", err } } pkgFile, err := ioutil.TempFile("", pb.Filename) if err != nil { - return err + return "", err } defer pkgFile.Close() + pkgFilePath, err := filepath.Abs(pkgFile.Name()) d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name()) // Unless the bytes were already downloaded above for checksum verification @@ -122,43 +130,48 @@ func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, // on demand over the network. bytesCopied, err := io.Copy(pkgFile, pkgReader) if err != nil { - return err + return pkgFilePath, err } d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name()) if expectedSize != 0 && bytesCopied != int64(expectedSize) { - return fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", + return pkgFilePath, fmt.Errorf("unexpected size (downloaded: %d, expected: %d)", bytesCopied, expectedSize) } r, err := zip.OpenReader(pkgFile.Name()) if err != nil { - return err + return pkgFilePath, err } defer r.Close() for _, f := range r.File { + if strings.Contains(f.Name, "..") { + // While we generally trust the source ZIP file + // we still reject path traversal attempts as a precaution. + continue + } srcFile, err := f.Open() if err != nil { - return err + return pkgFilePath, err } d.Logger.Printf("unpacking %s to %s", f.Name, dstDir) dstPath := filepath.Join(dstDir, f.Name) dstFile, err := os.Create(dstPath) if err != nil { - return err + return pkgFilePath, err } _, err = io.Copy(dstFile, srcFile) if err != nil { - return err + return pkgFilePath, err } srcFile.Close() dstFile.Close() } - return nil + return pkgFilePath, nil } // The production release site uses consistent single mime type diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go index 849f16a526..b100a8d2b5 100644 --- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go +++ b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "log" + "net/http" "net/url" "strings" @@ -68,7 +69,11 @@ func (r *Releases) ListProductVersions(ctx context.Context, productName string) url.PathEscape(productName)) r.logger.Printf("requesting versions from %s", productIndexURL) - resp, err := client.Get(productIndexURL) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, productIndexURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", productIndexURL, err) + } + resp, err := client.Do(req) if err != nil { return nil, err } @@ -133,7 +138,11 @@ func (r *Releases) GetProductVersion(ctx context.Context, product string, versio url.PathEscape(version.String())) r.logger.Printf("requesting version from %s", indexURL) - resp, err := client.Get(indexURL) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, indexURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request for %q: %w", indexURL, err) + } + resp, err := client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/hc-install/internal/version/version.go b/vendor/github.com/hashicorp/hc-install/internal/version/version.go deleted file mode 100644 index d8bc462c50..0000000000 --- a/vendor/github.com/hashicorp/hc-install/internal/version/version.go +++ /dev/null @@ -1,9 +0,0 @@ -package version - -const version = "0.1.0" - -// ModuleVersion returns the current version of the github.com/hashicorp/hc-install Go module. -// This is a function to allow for future possible enhancement using debug.BuildInfo. -func ModuleVersion() string { - return version -} diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go index aeeac94692..72e490e2f3 100644 --- a/vendor/github.com/hashicorp/hc-install/product/consul.go +++ b/vendor/github.com/hashicorp/hc-install/product/consul.go @@ -50,6 +50,6 @@ var Consul = Product{ BuildInstructions: &BuildInstructions{ GitRepoURL: "https://github.com/hashicorp/consul.git", PreCloneCheck: &build.GoIsInstalled{}, - Build: &build.GoBuild{Version: v1_18}, + Build: &build.GoBuild{}, }, } diff --git a/vendor/github.com/hashicorp/hc-install/product/vault.go b/vendor/github.com/hashicorp/hc-install/product/vault.go index d03bc4fabc..f00827bfec 100644 --- a/vendor/github.com/hashicorp/hc-install/product/vault.go +++ b/vendor/github.com/hashicorp/hc-install/product/vault.go @@ -14,7 +14,6 @@ import ( var ( vaultVersionOutputRe = regexp.MustCompile(`Vault ` + simpleVersionRe) - v1_17 = version.Must(version.NewVersion("1.17")) ) var Vault = Product{ @@ -49,6 +48,6 @@ var Vault = Product{ BuildInstructions: &BuildInstructions{ GitRepoURL: "https://github.com/hashicorp/vault.git", PreCloneCheck: &build.GoIsInstalled{}, - Build: &build.GoBuild{Version: v1_17}, + Build: &build.GoBuild{}, }, } diff --git a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go index 7fe2cb56ed..7e4091ef7e 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go +++ b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go @@ -115,7 +115,10 @@ func (ev *ExactVersion) Install(ctx context.Context) (string, error) { d.BaseURL = ev.apiBaseURL } - err = d.DownloadAndUnpack(ctx, pv, dstDir) + zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir) + if zipFilePath != "" { + ev.pathsToRemove = append(ev.pathsToRemove, zipFilePath) + } if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go index c5c1807a82..36aa5b2b3c 100644 --- a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go +++ b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go @@ -119,7 +119,10 @@ func (lv *LatestVersion) Install(ctx context.Context) (string, error) { if lv.apiBaseURL != "" { d.BaseURL = lv.apiBaseURL } - err = d.DownloadAndUnpack(ctx, versionToInstall, dstDir) + zipFilePath, err := d.DownloadAndUnpack(ctx, versionToInstall, dstDir) + if zipFilePath != "" { + lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath) + } if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/hc-install/version/VERSION b/vendor/github.com/hashicorp/hc-install/version/VERSION new file mode 100644 index 0000000000..79a2734bbf --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/version/VERSION @@ -0,0 +1 @@ +0.5.0 \ No newline at end of file diff --git a/vendor/github.com/hashicorp/hc-install/version/version.go b/vendor/github.com/hashicorp/hc-install/version/version.go new file mode 100644 index 0000000000..db367e560a --- /dev/null +++ b/vendor/github.com/hashicorp/hc-install/version/version.go @@ -0,0 +1,20 @@ +package version + +import ( + _ "embed" + + "github.com/hashicorp/go-version" +) + +//go:embed VERSION +var rawVersion string + +// Version returns the version of the library +// +// Note: This is only exposed as public function/package +// due to hard-coded constraints in the release tooling. +// In general downstream should not implement version-specific +// logic and rely on this function to be present in future releases. +func Version() *version.Version { + return version.Must(version.NewVersion(rawVersion)) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl b/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl new file mode 100644 index 0000000000..35eae08227 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl @@ -0,0 +1,16 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2014 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + header_ignore = [ + "hclsyntax/fuzz/testdata/**", + "hclwrite/fuzz/testdata/**", + "json/fuzz/testdata/**", + "specsuite/tests/**", + ] +} diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index 8bcfd834a1..daf3ad90f6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,31 @@ # HCL Changelog +## v2.16.2 (March 9, 2023) + +### Bugs Fixed + +* ext/typeexpr: Verify type assumptions when applying default values, and ignore input values that do not match type assumptions. ([#594](https://github.com/hashicorp/hcl/pull/594)) + +## v2.16.1 (February 13, 2023) + +### Bugs Fixed + +* hclsyntax: Report correct `Range.End` for `FunctionCall` with incomplete argument ([#588](https://github.com/hashicorp/hcl/pull/588)) + +## v2.16.0 (January 30, 2023) + +### Enhancements + +* ext/typeexpr: Modify the `Defaults` functionality to implement additional flexibility. HCL will now upcast lists and sets into tuples, and maps into objects, when applying default values if the applied defaults cause the elements within a target collection to have differing types. Previously, this would have resulted in a panic, now HCL will return a modified overall type. ([#574](https://github.com/hashicorp/hcl/pull/574)) + + Users should return to the advice provided by v2.14.0, and apply the go-cty convert functionality *after* setting defaults on a given `cty.Value`, rather than before. +* hclfmt: Avoid rewriting unchanged files. ([#576](https://github.com/hashicorp/hcl/pull/576)) +* hclsyntax: Simplify the AST for certain string expressions. ([#584](https://github.com/hashicorp/hcl/pull/584)) + +### Bugs Fixed + +* hclwrite: Fix data race in `formatSpaces`. ([#511](https://github.com/hashicorp/hcl/pull/511)) + ## v2.15.0 (November 10, 2022) ### Bugs Fixed diff --git a/vendor/github.com/hashicorp/hcl/v2/LICENSE b/vendor/github.com/hashicorp/hcl/v2/LICENSE index 82b4de97c7..e25da5fad9 100644 --- a/vendor/github.com/hashicorp/hcl/v2/LICENSE +++ b/vendor/github.com/hashicorp/hcl/v2/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index bcf4eb39c0..578f81a2c2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go index 0b4a2629b9..bdfad42bf9 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go index 6994e2336d..92be8f1a85 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //go:build go1.18 // +build go1.18 diff --git a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go index c12833440a..fd00ca6f65 100644 --- a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go +++ b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/doc.go b/vendor/github.com/hashicorp/hcl/v2/doc.go index 0d43fb2c78..a0e3119f2c 100644 --- a/vendor/github.com/hashicorp/hcl/v2/doc.go +++ b/vendor/github.com/hashicorp/hcl/v2/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package hcl contains the main modelling types and general utility functions // for HCL. // diff --git a/vendor/github.com/hashicorp/hcl/v2/eval_context.go b/vendor/github.com/hashicorp/hcl/v2/eval_context.go index 915910ad8a..921cfcb429 100644 --- a/vendor/github.com/hashicorp/hcl/v2/eval_context.go +++ b/vendor/github.com/hashicorp/hcl/v2/eval_context.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_call.go b/vendor/github.com/hashicorp/hcl/v2/expr_call.go index 6963fbae36..ca59b90d23 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_call.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_call.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprCall tests if the given expression is a function call and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_list.go b/vendor/github.com/hashicorp/hcl/v2/expr_list.go index d05cca0b9a..8c0cf40518 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_list.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_list.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprList tests if the given expression is a static list construct and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_map.go b/vendor/github.com/hashicorp/hcl/v2/expr_map.go index 96d1ce4bfa..56cf974740 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_map.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ExprMap tests if the given expression is a static map construct and, diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go index 6d5d205c49..6683fd5444 100644 --- a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go +++ b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl type unwrapExpression interface { diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go index c9d7a1efb2..e0dda0df9e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package customdecode contains a HCL extension that allows, in certain // contexts, expression evaluation to be overridden by custom static analysis. // diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go index af7c66c235..2477f21907 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go +++ b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package customdecode import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go index 8c20286b27..43689d7409 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go index ccc1c0ae2c..5b0e468102 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go index 617bc29dc2..defe3dbb78 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package hclsyntax contains the parser, AST, etc for HCL's native language, // as opposed to the JSON variant. // diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 358fd5d510..55fecd4eaf 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go index c1db0cecc8..6585612c10 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go index c3f96943d7..0b5ac19553 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go index a82bf790eb..ce5a5cb755 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax // Generated by expression_vars_get.go. DO NOT EDIT. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go index f55e9ce2c2..7be626ffd6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go index 841656a6a1..383ec6b85d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax //go:generate go run expression_vars_gen.go diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go index eef8b9626c..5124ae95c3 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go index af98ef0451..83e1d4efb5 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go index 41b35e53f8..6ead6091c6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go index ec83d3dc23..6ed88e6320 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -1174,7 +1177,12 @@ Token: // if there was a parse error in the argument then we've // probably been left in a weird place in the token stream, // so we'll bail out with a partial argument list. - p.recover(TokenCParen) + recoveredTok := p.recover(TokenCParen) + + // record the recovered token, if one was found + if recoveredTok.Type == TokenCParen { + closeTok = recoveredTok + } break Token } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go index ae8805856c..3ac6829881 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( @@ -38,6 +41,7 @@ func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Express if flushHeredoc { flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec } + meldConsecutiveStringLiterals(parts) tp := templateParser{ Tokens: parts.Tokens, SrcRange: parts.SrcRange, @@ -751,6 +755,37 @@ func flushHeredocTemplateParts(parts *templateParts) { } } +// meldConsecutiveStringLiterals simplifies the AST output by combining a +// sequence of string literal tokens into a single string literal. This must be +// performed after any whitespace trimming operations. +func meldConsecutiveStringLiterals(parts *templateParts) { + if len(parts.Tokens) == 0 { + return + } + + // Loop over all tokens starting at the second element, as we want to join + // pairs of consecutive string literals. + i := 1 + for i < len(parts.Tokens) { + if prevLiteral, ok := parts.Tokens[i-1].(*templateLiteralToken); ok { + if literal, ok := parts.Tokens[i].(*templateLiteralToken); ok { + // The current and previous tokens are both literals: combine + prevLiteral.Val = prevLiteral.Val + literal.Val + prevLiteral.SrcRange.End = literal.SrcRange.End + + // Remove the current token from the slice + parts.Tokens = append(parts.Tokens[:i], parts.Tokens[i+1:]...) + + // Continue without moving forward in the slice + continue + } + } + + // Try the next pair of tokens + i++ + } +} + type templateParts struct { Tokens []templateToken SrcRange hcl.Range diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go index 7dcb0fd341..3afa6ab064 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go index f056f906e3..74fa3fb331 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go index 0b68efd600..d56f8e50be 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go index 2895ade758..5d60ff5a5e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //line scan_string_lit.rl:1 package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go index 794123a851..1bbbb92781 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + //line scan_tokens.rl:1 package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md index 3201f54a79..6d31e35255 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -82,7 +82,7 @@ Comments serve as program documentation and come in two forms: - _Inline comments_ start with the `/*` sequence and end with the `*/` sequence, and may have any characters within except the ending sequence. - An inline comments is considered equivalent to a whitespace sequence. + An inline comment is considered equivalent to a whitespace sequence. Comments and whitespace cannot begin within other comments, or within template literals except inside an interpolation sequence or template directive. @@ -268,10 +268,10 @@ collection value. ```ebnf CollectionValue = tuple | object; tuple = "[" ( - (Expression ("," Expression)* ","?)? + (Expression (("," | Newline) Expression)* ","?)? ) "]"; object = "{" ( - (objectelem ("," objectelem)* ","?)? + (objectelem (( "," | Newline) objectelem)* ","?)? ) "}"; objectelem = (Identifier | Expression) ("=" | ":") Expression; ``` @@ -635,7 +635,7 @@ binaryOp = ExprTerm binaryOperator ExprTerm; binaryOperator = compareOperator | arithmeticOperator | logicOperator; compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; arithmeticOperator = "+" | "-" | "*" | "/" | "%"; -logicOperator = "&&" | "||" | "!"; +logicOperator = "&&" | "||"; ``` The unary operators have the highest precedence. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go index f42ae918ee..ff272631d4 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go index 587844ac20..5085716845 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index 5ef093f9ad..a14b39407f 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb index 422e4e5ccd..235265de96 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb @@ -1,4 +1,7 @@ #!/usr/bin/env ruby +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + # # This scripted has been updated to accept more command-line arguments: # diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go index 3d68c4139d..b4be926986 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go index 89bfe676e9..82b6f9b035 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclsyntax import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/merged.go b/vendor/github.com/hashicorp/hcl/v2/merged.go index 96e62a58d4..27fd1ed5eb 100644 --- a/vendor/github.com/hashicorp/hcl/v2/merged.go +++ b/vendor/github.com/hashicorp/hcl/v2/merged.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/ops.go b/vendor/github.com/hashicorp/hcl/v2/ops.go index 47dc02ad3b..bdf23614d6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ops.go +++ b/vendor/github.com/hashicorp/hcl/v2/ops.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/pos.go b/vendor/github.com/hashicorp/hcl/v2/pos.go index 06db8bfbd4..1bbbce87e1 100644 --- a/vendor/github.com/hashicorp/hcl/v2/pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import "fmt" diff --git a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go index 49077fc52a..cff5539270 100644 --- a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go +++ b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/schema.go b/vendor/github.com/hashicorp/hcl/v2/schema.go index 891257acb2..d4e339cb26 100644 --- a/vendor/github.com/hashicorp/hcl/v2/schema.go +++ b/vendor/github.com/hashicorp/hcl/v2/schema.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // BlockHeaderSchema represents the shape of a block header, and is diff --git a/vendor/github.com/hashicorp/hcl/v2/static_expr.go b/vendor/github.com/hashicorp/hcl/v2/static_expr.go index 98ada87b62..e14d7f890a 100644 --- a/vendor/github.com/hashicorp/hcl/v2/static_expr.go +++ b/vendor/github.com/hashicorp/hcl/v2/static_expr.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/structure.go b/vendor/github.com/hashicorp/hcl/v2/structure.go index aab09457d7..2bdf579d1d 100644 --- a/vendor/github.com/hashicorp/hcl/v2/structure.go +++ b/vendor/github.com/hashicorp/hcl/v2/structure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go index 8521814e5f..62aba139f0 100644 --- a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go +++ b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // ----------------------------------------------------------------------------- diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal.go b/vendor/github.com/hashicorp/hcl/v2/traversal.go index d710197008..540dde7ec7 100644 --- a/vendor/github.com/hashicorp/hcl/v2/traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/traversal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl import ( diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go index f69d5fe9b2..87eeb15997 100644 --- a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go +++ b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hcl // AbsTraversalForExpr attempts to interpret the given expression as @@ -13,7 +16,7 @@ package hcl // // In most cases the calling application is interested in the value // that results from an expression, but in rarer cases the application -// needs to see the the name of the variable and subsequent +// needs to see the name of the variable and subsequent // attributes/indexes itself, for example to allow users to give references // to the variables themselves rather than to their values. An implementer // of this function should at least support attribute and index steps. diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE index a612ad9813..c121cee6e5 100644 --- a/vendor/github.com/hashicorp/terraform-exec/LICENSE +++ b/vendor/github.com/hashicorp/terraform-exec/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2020 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go index dbc9466b35..bf1a046c74 100644 --- a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go @@ -1,6 +1,6 @@ package version -const version = "0.17.3" +const version = "0.18.1" // ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module. // This is a function to allow for future possible enhancement using debug.BuildInfo. diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go index 40d9e69b9f..6dfdb976c5 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go @@ -3,6 +3,7 @@ package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) @@ -99,6 +100,27 @@ func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error { return tf.runTerraformCmd(ctx, cmd) } +// ApplyJSON represents the terraform apply subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. ApplyJSON is likely to be +// removed in a future major version in favour of Apply returning JSON by default. +func (tf *Terraform) ApplyJSON(ctx context.Context, w io.Writer, opts ...ApplyOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform apply -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.applyJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { c := defaultApplyOptions @@ -106,6 +128,32 @@ func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.C o.configureApply(&c) } + args, err := tf.buildApplyArgs(ctx, c) + if err != nil { + return nil, err + } + + return tf.buildApplyCmd(ctx, c, args) +} + +func (tf *Terraform) applyJSONCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) { + c := defaultApplyOptions + + for _, o := range opts { + o.configureApply(&c) + } + + args, err := tf.buildApplyArgs(ctx, c) + if err != nil { + return nil, err + } + + args = append(args, "-json") + + return tf.buildApplyCmd(ctx, c, args) +} + +func (tf *Terraform) buildApplyArgs(ctx context.Context, c applyConfig) ([]string, error) { args := []string{"apply", "-no-color", "-auto-approve", "-input=false"} // string opts: only pass if set @@ -151,6 +199,10 @@ func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.C } } + return args, nil +} + +func (tf *Terraform) buildApplyCmd(ctx context.Context, c applyConfig, args []string) (*exec.Cmd, error) { // string argument: pass if set if c.dirOrPlan != "" { args = append(args, c.dirOrPlan) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go index 6d7b768ee7..79dacc9333 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go @@ -5,6 +5,7 @@ package tfexec import ( "context" + "fmt" "os/exec" "strings" "sync" @@ -40,11 +41,14 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { } err = cmd.Start() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, "") + return err } var errStdout, errStderr error @@ -66,19 +70,22 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { wg.Wait() err = cmd.Wait() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, errBuf.String()) + return fmt.Errorf("%w\n%s", err, errBuf.String()) } // Return error if there was an issue reading the std out/err if errStdout != nil && ctx.Err() != nil { - return tf.wrapExitError(ctx, errStdout, errBuf.String()) + return fmt.Errorf("%w\n%s", errStdout, errBuf.String()) } if errStderr != nil && ctx.Err() != nil { - return tf.wrapExitError(ctx, errStderr, errBuf.String()) + return fmt.Errorf("%w\n%s", errStderr, errBuf.String()) } return nil diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go index 6fa40e0aa3..440fafe8f2 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go @@ -2,6 +2,7 @@ package tfexec import ( "context" + "fmt" "os/exec" "strings" "sync" @@ -45,11 +46,14 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { } err = cmd.Start() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, "") + return err } var errStdout, errStderr error @@ -71,19 +75,22 @@ func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error { wg.Wait() err = cmd.Wait() - if err == nil && ctx.Err() != nil { - err = ctx.Err() + if ctx.Err() != nil { + return cmdErr{ + err: err, + ctxErr: ctx.Err(), + } } if err != nil { - return tf.wrapExitError(ctx, err, errBuf.String()) + return fmt.Errorf("%w\n%s", err, errBuf.String()) } // Return error if there was an issue reading the std out/err if errStdout != nil && ctx.Err() != nil { - return tf.wrapExitError(ctx, errStdout, errBuf.String()) + return fmt.Errorf("%w\n%s", errStdout, errBuf.String()) } if errStderr != nil && ctx.Err() != nil { - return tf.wrapExitError(ctx, errStderr, errBuf.String()) + return fmt.Errorf("%w\n%s", errStderr, errBuf.String()) } return nil diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go index 8011c0ba86..189db7e45f 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go @@ -3,6 +3,7 @@ package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) @@ -95,6 +96,27 @@ func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error { return tf.runTerraformCmd(ctx, cmd) } +// DestroyJSON represents the terraform destroy subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. DestroyJSON is likely to be +// removed in a future major version in favour of Destroy returning JSON by default. +func (tf *Terraform) DestroyJSON(ctx context.Context, w io.Writer, opts ...DestroyOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform destroy -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.destroyJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { c := defaultDestroyOptions @@ -102,6 +124,25 @@ func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*ex o.configureDestroy(&c) } + args := tf.buildDestroyArgs(c) + + return tf.buildDestroyCmd(ctx, c, args) +} + +func (tf *Terraform) destroyJSONCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) { + c := defaultDestroyOptions + + for _, o := range opts { + o.configureDestroy(&c) + } + + args := tf.buildDestroyArgs(c) + args = append(args, "-json") + + return tf.buildDestroyCmd(ctx, c, args) +} + +func (tf *Terraform) buildDestroyArgs(c destroyConfig) []string { args := []string{"destroy", "-no-color", "-auto-approve", "-input=false"} // string opts: only pass if set @@ -138,6 +179,10 @@ func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*ex } } + return args +} + +func (tf *Terraform) buildDestroyCmd(ctx context.Context, c destroyConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go index 7a32ef2f1f..3bbb431caa 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go @@ -1,6 +1,9 @@ package tfexec -import "fmt" +import ( + "context" + "fmt" +) // this file contains non-parsed exported errors @@ -37,3 +40,25 @@ type ErrManualEnvVar struct { func (err *ErrManualEnvVar) Error() string { return fmt.Sprintf("manual setting of env var %q detected", err.Name) } + +// cmdErr is a custom error type to be returned when a cmd exits with a context +// error such as context.Canceled or context.DeadlineExceeded. +// The type is specifically designed to respond true to errors.Is for these two +// errors. +// See https://github.com/golang/go/issues/21880 for why this is necessary. +type cmdErr struct { + err error + ctxErr error +} + +func (e cmdErr) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return e.ctxErr == context.DeadlineExceeded || e.ctxErr == context.Canceled + } + return false +} + +func (e cmdErr) Error() string { + return e.err.Error() +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go deleted file mode 100644 index 9fc152dddd..0000000000 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/exit_errors.go +++ /dev/null @@ -1,344 +0,0 @@ -package tfexec - -import ( - "context" - "fmt" - "os/exec" - "regexp" - "strings" - "text/template" -) - -// this file contains errors parsed from stderr - -var ( - // The "Required variable not set:" case is for 0.11 - missingVarErrRegexp = regexp.MustCompile(`Error: No value for required variable|Error: Required variable not set:`) - missingVarNameRegexp = regexp.MustCompile(`The root module input variable\s"(.+)"\sis\snot\sset,\sand\shas\sno\sdefault|Error: Required variable not set: (.+)`) - - usageRegexp = regexp.MustCompile(`Too many command line arguments|^Usage: .*Options:.*|Error: Invalid -\d+ option`) - - noInitErrRegexp = regexp.MustCompile( - // UNINITIALISED PROVIDERS/MODULES - `Error: Could not satisfy plugin requirements|` + - `Error: Could not load plugin|` + // v0.13 - `Please run \"terraform init\"|` + // v1.1.0 early alpha versions (ref 89b05050) - `run:\s+terraform init|` + // v1.1.0 (ref df578afd) - `Run\s+\"terraform init\"|` + // v1.2.0 - - // UNINITIALISED BACKENDS - `Error: Initialization required.|` + // v0.13 - `Error: Backend initialization required, please run \"terraform init\"`, // v0.15 - ) - - noConfigErrRegexp = regexp.MustCompile(`Error: No configuration files`) - - workspaceDoesNotExistRegexp = regexp.MustCompile(`Workspace "(.+)" doesn't exist.`) - - workspaceAlreadyExistsRegexp = regexp.MustCompile(`Workspace "(.+)" already exists`) - - tfVersionMismatchErrRegexp = regexp.MustCompile(`Error: The currently running version of Terraform doesn't meet the|Error: Unsupported Terraform Core version`) - tfVersionMismatchConstraintRegexp = regexp.MustCompile(`required_version = "(.+)"|Required version: (.+)\b`) - configInvalidErrRegexp = regexp.MustCompile(`There are some problems with the configuration, described below.`) - - stateLockErrRegexp = regexp.MustCompile(`Error acquiring the state lock`) - stateLockInfoRegexp = regexp.MustCompile(`Lock Info:\n\s*ID:\s*([^\n]+)\n\s*Path:\s*([^\n]+)\n\s*Operation:\s*([^\n]+)\n\s*Who:\s*([^\n]+)\n\s*Version:\s*([^\n]+)\n\s*Created:\s*([^\n]+)\n`) - statePlanReadErrRegexp = regexp.MustCompile( - `Terraform couldn't read the given file as a state or plan file.|` + - `Error: Failed to read the given file as a state or plan file`) - lockIdInvalidErrRegexp = regexp.MustCompile(`Failed to unlock state: `) -) - -func (tf *Terraform) wrapExitError(ctx context.Context, err error, stderr string) error { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // not an exit error, short circuit, nothing to wrap - return err - } - - ctxErr := ctx.Err() - - // nothing to parse, return early - errString := strings.TrimSpace(stderr) - if errString == "" { - return &unwrapper{exitErr, ctxErr} - } - - switch { - case tfVersionMismatchErrRegexp.MatchString(stderr): - constraint := "" - constraints := tfVersionMismatchConstraintRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(constraints); i++ { - constraint = strings.TrimSpace(constraints[i]) - if constraint != "" { - break - } - } - - if constraint == "" { - // hardcode a value here for weird cases (incl. 0.12) - constraint = "unknown" - } - - // only set this if it happened to be cached already - ver := "" - if tf != nil && tf.execVersion != nil { - ver = tf.execVersion.String() - } - - return &ErrTFVersionMismatch{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Constraint: constraint, - TFVersion: ver, - } - case missingVarErrRegexp.MatchString(stderr): - name := "" - names := missingVarNameRegexp.FindStringSubmatch(stderr) - for i := 1; i < len(names); i++ { - name = strings.TrimSpace(names[i]) - if name != "" { - break - } - } - - return &ErrMissingVar{ - unwrapper: unwrapper{exitErr, ctxErr}, - - VariableName: name, - } - case usageRegexp.MatchString(stderr): - return &ErrCLIUsage{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case noInitErrRegexp.MatchString(stderr): - return &ErrNoInit{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case noConfigErrRegexp.MatchString(stderr): - return &ErrNoConfig{ - unwrapper: unwrapper{exitErr, ctxErr}, - - stderr: stderr, - } - case workspaceDoesNotExistRegexp.MatchString(stderr): - submatches := workspaceDoesNotExistRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrNoWorkspace{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Name: submatches[1], - } - } - case workspaceAlreadyExistsRegexp.MatchString(stderr): - submatches := workspaceAlreadyExistsRegexp.FindStringSubmatch(stderr) - if len(submatches) == 2 { - return &ErrWorkspaceExists{ - unwrapper: unwrapper{exitErr, ctxErr}, - - Name: submatches[1], - } - } - case configInvalidErrRegexp.MatchString(stderr): - return &ErrConfigInvalid{stderr: stderr} - case stateLockErrRegexp.MatchString(stderr): - submatches := stateLockInfoRegexp.FindStringSubmatch(stderr) - if len(submatches) == 7 { - return &ErrStateLocked{ - unwrapper: unwrapper{exitErr, ctxErr}, - - ID: submatches[1], - Path: submatches[2], - Operation: submatches[3], - Who: submatches[4], - Version: submatches[5], - Created: submatches[6], - } - } - case statePlanReadErrRegexp.MatchString(stderr): - return &ErrStatePlanRead{stderr: stderr} - case lockIdInvalidErrRegexp.MatchString(stderr): - return &ErrLockIdInvalid{stderr: stderr} - } - - return fmt.Errorf("%w\n%s", &unwrapper{exitErr, ctxErr}, stderr) -} - -type unwrapper struct { - err error - ctxErr error -} - -func (u *unwrapper) Unwrap() error { - return u.err -} - -func (u *unwrapper) Is(target error) bool { - switch target { - case context.DeadlineExceeded, context.Canceled: - return u.ctxErr == context.DeadlineExceeded || - u.ctxErr == context.Canceled - } - return false -} - -func (u *unwrapper) Error() string { - return u.err.Error() -} - -type ErrConfigInvalid struct { - stderr string -} - -func (e *ErrConfigInvalid) Error() string { - return "configuration is invalid" -} - -type ErrMissingVar struct { - unwrapper - - VariableName string -} - -func (err *ErrMissingVar) Error() string { - return fmt.Sprintf("variable %q was required but not supplied", err.VariableName) -} - -type ErrNoWorkspace struct { - unwrapper - - Name string -} - -func (err *ErrNoWorkspace) Error() string { - return fmt.Sprintf("workspace %q does not exist", err.Name) -} - -// ErrWorkspaceExists is returned when creating a workspace that already exists -type ErrWorkspaceExists struct { - unwrapper - - Name string -} - -func (err *ErrWorkspaceExists) Error() string { - return fmt.Sprintf("workspace %q already exists", err.Name) -} - -type ErrNoInit struct { - unwrapper - - stderr string -} - -func (e *ErrNoInit) Error() string { - return e.stderr -} - -type ErrStatePlanRead struct { - unwrapper - - stderr string -} - -func (e *ErrStatePlanRead) Error() string { - return e.stderr -} - -type ErrNoConfig struct { - unwrapper - - stderr string -} - -func (e *ErrNoConfig) Error() string { - return e.stderr -} - -type ErrLockIdInvalid struct { - unwrapper - - stderr string -} - -func (e *ErrLockIdInvalid) Error() string { - return e.stderr -} - -// ErrCLIUsage is returned when the combination of flags or arguments is incorrect. -// -// CLI indicates usage errors in three different ways: either -// 1. Exit 1, with a custom error message on stderr. -// 2. Exit 1, with command usage logged to stderr. -// 3. Exit 127, with command usage logged to stdout. -// Currently cases 1 and 2 are handled. -// TODO KEM: Handle exit 127 case. How does this work on non-Unix platforms? -type ErrCLIUsage struct { - unwrapper - - stderr string -} - -func (e *ErrCLIUsage) Error() string { - return e.stderr -} - -// ErrTFVersionMismatch is returned when the running Terraform version is not compatible with the -// value specified for required_version in the terraform block. -type ErrTFVersionMismatch struct { - unwrapper - - TFVersion string - - // Constraint is not returned in the error messaging on 0.12 - Constraint string -} - -func (e *ErrTFVersionMismatch) Error() string { - version := "version" - if e.TFVersion != "" { - version = e.TFVersion - } - - requirement := "" - if e.Constraint != "" { - requirement = fmt.Sprintf(" (%s required)", e.Constraint) - } - - return fmt.Sprintf("terraform %s not supported by configuration%s", - version, requirement) -} - -// ErrStateLocked is returned when the state lock is already held by another process. -type ErrStateLocked struct { - unwrapper - - ID string - Path string - Operation string - Who string - Version string - Created string -} - -func (e *ErrStateLocked) Error() string { - tmpl := `Lock Info: - ID: {{.ID}} - Path: {{.Path}} - Operation: {{.Operation}} - Who: {{.Who}} - Version: {{.Version}} - Created: {{.Created}} -` - - t := template.Must(template.New("LockInfo").Parse(tmpl)) - var out strings.Builder - if err := t.Execute(&out, e); err != nil { - return "error acquiring the state lock" - } - return fmt.Sprintf("error acquiring the state lock: %v", out.String()) -} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go new file mode 100644 index 0000000000..4577e062b4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go @@ -0,0 +1,34 @@ +package tfexec + +import ( + "context" + "fmt" + "os/exec" + + tfjson "github.com/hashicorp/terraform-json" +) + +// MetadataFunctions represents the terraform metadata functions -json subcommand. +func (tf *Terraform) MetadataFunctions(ctx context.Context) (*tfjson.MetadataFunctions, error) { + err := tf.compatible(ctx, tf1_4_0, nil) + if err != nil { + return nil, fmt.Errorf("terraform metadata functions was added in 1.4.0: %w", err) + } + + functionsCmd := tf.metadataFunctionsCmd(ctx) + + var ret tfjson.MetadataFunctions + err = tf.runTerraformCmdJSON(ctx, functionsCmd, &ret) + if err != nil { + return nil, err + } + + return &ret, nil +} + +func (tf *Terraform) metadataFunctionsCmd(ctx context.Context, args ...string) *exec.Cmd { + allArgs := []string{"metadata", "functions", "-json"} + allArgs = append(allArgs, args...) + + return tf.buildTerraformCmd(ctx, nil, allArgs...) +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go index bf41094bb8..5ea3155202 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go @@ -3,6 +3,7 @@ package tfexec import ( "context" "fmt" + "io" "os/exec" "strconv" ) @@ -108,6 +109,42 @@ func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) return false, err } +// PlanJSON executes `terraform plan` with the specified options as well as the +// `-json` flag and waits for it to complete. +// +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. +// +// The returned boolean is false when the plan diff is empty (no changes) and +// true when the plan diff is non-empty (changes present). +// +// The returned error is nil if `terraform plan` has been executed and exits +// with either 0 or 2. +// +// PlanJSON is likely to be removed in a future major version in favour of +// Plan returning JSON by default. +func (tf *Terraform) PlanJSON(ctx context.Context, w io.Writer, opts ...PlanOption) (bool, error) { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return false, fmt.Errorf("terraform plan -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.planJSONCmd(ctx, opts...) + if err != nil { + return false, err + } + + err = tf.runTerraformCmd(ctx, cmd) + if err != nil && cmd.ProcessState.ExitCode() == 2 { + return true, nil + } + + return false, err +} + func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { c := defaultPlanOptions @@ -115,6 +152,32 @@ func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd o.configurePlan(&c) } + args, err := tf.buildPlanArgs(ctx, c) + if err != nil { + return nil, err + } + + return tf.buildPlanCmd(ctx, c, args) +} + +func (tf *Terraform) planJSONCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) { + c := defaultPlanOptions + + for _, o := range opts { + o.configurePlan(&c) + } + + args, err := tf.buildPlanArgs(ctx, c) + if err != nil { + return nil, err + } + + args = append(args, "-json") + + return tf.buildPlanCmd(ctx, c, args) +} + +func (tf *Terraform) buildPlanArgs(ctx context.Context, c planConfig) ([]string, error) { args := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"} // string opts: only pass if set @@ -162,6 +225,10 @@ func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd } } + return args, nil +} + +func (tf *Terraform) buildPlanCmd(ctx context.Context, c planConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go index 78f6b4b501..4bdd8960f5 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go @@ -2,6 +2,8 @@ package tfexec import ( "context" + "fmt" + "io" "os/exec" "strconv" ) @@ -78,6 +80,27 @@ func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) erro return tf.runTerraformCmd(ctx, cmd) } +// RefreshJSON represents the terraform refresh subcommand with the `-json` flag. +// Using the `-json` flag will result in +// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui) +// JSON being written to the supplied `io.Writer`. RefreshJSON is likely to be +// removed in a future major version in favour of Refresh returning JSON by default. +func (tf *Terraform) RefreshJSON(ctx context.Context, w io.Writer, opts ...RefreshCmdOption) error { + err := tf.compatible(ctx, tf0_15_3, nil) + if err != nil { + return fmt.Errorf("terraform refresh -json was added in 0.15.3: %w", err) + } + + tf.SetStdout(w) + + cmd, err := tf.refreshJSONCmd(ctx, opts...) + if err != nil { + return err + } + + return tf.runTerraformCmd(ctx, cmd) +} + func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { c := defaultRefreshOptions @@ -85,6 +108,26 @@ func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) ( o.configureRefresh(&c) } + args := tf.buildRefreshArgs(c) + + return tf.buildRefreshCmd(ctx, c, args) + +} + +func (tf *Terraform) refreshJSONCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) { + c := defaultRefreshOptions + + for _, o := range opts { + o.configureRefresh(&c) + } + + args := tf.buildRefreshArgs(c) + args = append(args, "-json") + + return tf.buildRefreshCmd(ctx, c, args) +} + +func (tf *Terraform) buildRefreshArgs(c refreshConfig) []string { args := []string{"refresh", "-no-color", "-input=false"} // string opts: only pass if set @@ -119,6 +162,10 @@ func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) ( } } + return args +} + +func (tf *Terraform) buildRefreshCmd(ctx context.Context, c refreshConfig, args []string) (*exec.Cmd, error) { // optional positional argument if c.dir != "" { args = append(args, c.dir) diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go index bb8be17d58..10d7d9adba 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go @@ -32,14 +32,14 @@ type printfer interface { // but it ignores certain environment variables that are managed within the code and prohibits // setting them through SetEnv: // -// - TF_APPEND_USER_AGENT -// - TF_IN_AUTOMATION -// - TF_INPUT -// - TF_LOG -// - TF_LOG_PATH -// - TF_REATTACH_PROVIDERS -// - TF_DISABLE_PLUGIN_TLS -// - TF_SKIP_PROVIDER_VERIFY +// - TF_APPEND_USER_AGENT +// - TF_IN_AUTOMATION +// - TF_INPUT +// - TF_LOG +// - TF_LOG_PATH +// - TF_REATTACH_PROVIDERS +// - TF_DISABLE_PLUGIN_TLS +// - TF_SKIP_PROVIDER_VERIFY type Terraform struct { execPath string workingDir string diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go index 9978ae28de..a2e973681f 100644 --- a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go +++ b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go @@ -25,7 +25,9 @@ var ( tf0_14_0 = version.Must(version.NewVersion("0.14.0")) tf0_15_0 = version.Must(version.NewVersion("0.15.0")) tf0_15_2 = version.Must(version.NewVersion("0.15.2")) + tf0_15_3 = version.Must(version.NewVersion("0.15.3")) tf1_1_0 = version.Must(version.NewVersion("1.1.0")) + tf1_4_0 = version.Must(version.NewVersion("1.4.0")) ) // Version returns structured output from the terraform version command including both the Terraform CLI version diff --git a/vendor/github.com/hashicorp/terraform-json/CODEOWNERS b/vendor/github.com/hashicorp/terraform-json/CODEOWNERS new file mode 100644 index 0000000000..a99f162a51 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/CODEOWNERS @@ -0,0 +1,2 @@ +# This codebase has shared ownership and responsibility. +* @hashicorp/terraform-core @hashicorp/terraform-devex @hashicorp/tf-editor-experience-engineers diff --git a/vendor/github.com/hashicorp/terraform-json/LICENSE b/vendor/github.com/hashicorp/terraform-json/LICENSE index a612ad9813..3b97eaf3c3 100644 --- a/vendor/github.com/hashicorp/terraform-json/LICENSE +++ b/vendor/github.com/hashicorp/terraform-json/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2019 HashiCorp, Inc. + Mozilla Public License Version 2.0 ================================== diff --git a/vendor/github.com/hashicorp/terraform-json/README.md b/vendor/github.com/hashicorp/terraform-json/README.md index fea0ba2609..4a9cd94a11 100644 --- a/vendor/github.com/hashicorp/terraform-json/README.md +++ b/vendor/github.com/hashicorp/terraform-json/README.md @@ -1,6 +1,5 @@ # terraform-json -[![CircleCI](https://circleci.com/gh/hashicorp/terraform-json/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/terraform-json/tree/main) [![GoDoc](https://godoc.org/github.com/hashicorp/terraform-json?status.svg)](https://godoc.org/github.com/hashicorp/terraform-json) This repository houses data types designed to help parse the data produced by diff --git a/vendor/github.com/hashicorp/terraform-json/metadata.go b/vendor/github.com/hashicorp/terraform-json/metadata.go new file mode 100644 index 0000000000..70d7ce4380 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-json/metadata.go @@ -0,0 +1,104 @@ +package tfjson + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" +) + +// MetadataFunctionsFormatVersionConstraints defines the versions of the JSON +// metadata functions format that are supported by this package. +var MetadataFunctionsFormatVersionConstraints = "~> 1.0" + +// MetadataFunctions is the top-level object returned when exporting function +// signatures +type MetadataFunctions struct { + // The version of the format. This should always match the + // MetadataFunctionsFormatVersionConstraints in this package, else + // unmarshaling will fail. + FormatVersion string `json:"format_version"` + + // The signatures of the functions available in a Terraform version. + Signatures map[string]*FunctionSignature `json:"function_signatures,omitempty"` +} + +// Validate checks to ensure that MetadataFunctions is present, and the +// version matches the version supported by this library. +func (f *MetadataFunctions) Validate() error { + if f == nil { + return errors.New("metadata functions data is nil") + } + + if f.FormatVersion == "" { + return errors.New("unexpected metadata functions data, format version is missing") + } + + constraint, err := version.NewConstraint(MetadataFunctionsFormatVersionConstraints) + if err != nil { + return fmt.Errorf("invalid version constraint: %w", err) + } + + version, err := version.NewVersion(f.FormatVersion) + if err != nil { + return fmt.Errorf("invalid format version %q: %w", f.FormatVersion, err) + } + + if !constraint.Check(version) { + return fmt.Errorf("unsupported metadata functions format version: %q does not satisfy %q", + version, constraint) + } + + return nil +} + +func (f *MetadataFunctions) UnmarshalJSON(b []byte) error { + type rawFunctions MetadataFunctions + var functions rawFunctions + + err := json.Unmarshal(b, &functions) + if err != nil { + return err + } + + *f = *(*MetadataFunctions)(&functions) + + return f.Validate() +} + +// FunctionSignature represents a function signature. +type FunctionSignature struct { + // Description is an optional human-readable description + // of the function + Description string `json:"description,omitempty"` + + // ReturnType is the ctyjson representation of the function's + // return types based on supplying all parameters using + // dynamic types. Functions can have dynamic return types. + ReturnType cty.Type `json:"return_type"` + + // Parameters describes the function's fixed positional parameters. + Parameters []*FunctionParameter `json:"parameters,omitempty"` + + // VariadicParameter describes the function's variadic + // parameter if it is supported. + VariadicParameter *FunctionParameter `json:"variadic_parameter,omitempty"` +} + +// FunctionParameter represents a parameter to a function. +type FunctionParameter struct { + // Name is an optional name for the argument. + Name string `json:"name,omitempty"` + + // Description is an optional human-readable description + // of the argument + Description string `json:"description,omitempty"` + + // IsNullable is true if null is acceptable value for the argument + IsNullable bool `json:"is_nullable,omitempty"` + + // A type that any argument for this parameter must conform to. + Type cty.Type `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go index 274006a018..6cfd533e99 100644 --- a/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/vendor/github.com/hashicorp/terraform-json/plan.go @@ -42,6 +42,10 @@ type Plan struct { // this plan. PlannedValues *StateValues `json:"planned_values,omitempty"` + // The change operations for resources and data sources within this plan + // resulting from resource drift. + ResourceDrift []*ResourceChange `json:"resource_drift,omitempty"` + // The change operations for resources and data sources within this // plan. ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"` diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go index 027224b620..f7c8abd50f 100644 --- a/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -39,7 +39,7 @@ func (p *ProviderSchemas) Validate() error { return errors.New("unexpected provider schema data, format version is missing") } - constraint, err := version.NewConstraint(PlanFormatVersionConstraints) + constraint, err := version.NewConstraint(ProviderSchemasFormatVersionConstraints) if err != nil { return fmt.Errorf("invalid version constraint: %w", err) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go index 0e220f57cd..36dfcd764d 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go @@ -122,7 +122,7 @@ func protocolDataDynamicValue6(_ context.Context, value *tfprotov6.DynamicValue) } func writeProtocolFile(ctx context.Context, dataDir string, rpc string, message string, field string, fileExtension string, fileContents []byte) { - fileName := fmt.Sprintf("%d_%s_%s_%s", time.Now().Unix(), rpc, message, field) + fileName := fmt.Sprintf("%d_%s_%s_%s", time.Now().UnixMilli(), rpc, message, field) if fileExtension != "" { fileName += "." + fileExtension diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go index 979896f778..81d692cef9 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go @@ -1,6 +1,8 @@ package toproto import ( + "unicode/utf8" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" ) @@ -8,8 +10,8 @@ import ( func Diagnostic(in *tfprotov5.Diagnostic) (*tfplugin5.Diagnostic, error) { diag := &tfplugin5.Diagnostic{ Severity: Diagnostic_Severity(in.Severity), - Summary: in.Summary, - Detail: in.Detail, + Summary: forceValidUTF8(in.Summary), + Detail: forceValidUTF8(in.Detail), } if in.Attribute != nil { attr, err := AttributePath(in.Attribute) @@ -41,6 +43,59 @@ func Diagnostics(in []*tfprotov5.Diagnostic) ([]*tfplugin5.Diagnostic, error) { return diagnostics, nil } +// forceValidUTF8 returns a string guaranteed to be valid UTF-8 even if the +// input isn't, by replacing any invalid bytes with a valid UTF-8 encoding of +// the Unicode Replacement Character (\uFFFD). +// +// The protobuf serialization library will reject invalid UTF-8 with an +// unhelpful error message: +// +// string field contains invalid UTF-8 +// +// Passing a string result through this function makes invalid UTF-8 instead +// emerge as placeholder characters on the other side of the wire protocol, +// giving a better chance of still returning a partially-legible message +// instead of a generic character encoding error. +// +// This is intended for user-facing messages such as diagnostic summary and +// detail messages, where Terraform will just treat the value as opaque and +// it's ultimately up to the user and their terminal or web browser to +// interpret the result. Don't use this for strings that have machine-readable +// meaning. +func forceValidUTF8(s string) string { + // Most strings that pass through here will already be valid UTF-8 and + // utf8.ValidString has a fast path which will beat our rune-by-rune + // analysis below, so it's worth the cost of walking the string twice + // in the rarer invalid case. + if utf8.ValidString(s) { + return s + } + + // If we get down here then we know there's at least one invalid UTF-8 + // sequence in the string, so in this slow path we'll reconstruct the + // string one rune at a time, guaranteeing that we'll only write valid + // UTF-8 sequences into the resulting buffer. + // + // Any invalid string will grow at least a little larger as a result of + // this operation because we'll be replacing each invalid byte with + // the three-byte sequence \xEF\xBF\xBD, which is the UTF-8 encoding of + // the replacement character \uFFFD. 9 is a magic number giving room for + // three such expansions without any further allocation. + ret := make([]byte, 0, len(s)+9) + for { + // If the first byte in s is not the start of a valid UTF-8 sequence + // then the following will return utf8.RuneError, 1, where + // utf8.RuneError is the unicode replacement character. + r, advance := utf8.DecodeRuneInString(s) + if advance == 0 { + break + } + s = s[advance:] + ret = utf8.AppendRune(ret, r) + } + return string(ret) +} + // we have to say this next thing to get golint to stop yelling at us about the // underscores in the function names. We want the function names to match // actually-generated code, so it feels like fair play. It's just a shame we diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go index 26ce3e9df7..4144222ce4 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go +++ b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go @@ -1,6 +1,8 @@ package toproto import ( + "unicode/utf8" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" ) @@ -8,8 +10,8 @@ import ( func Diagnostic(in *tfprotov6.Diagnostic) (*tfplugin6.Diagnostic, error) { diag := &tfplugin6.Diagnostic{ Severity: Diagnostic_Severity(in.Severity), - Summary: in.Summary, - Detail: in.Detail, + Summary: forceValidUTF8(in.Summary), + Detail: forceValidUTF8(in.Detail), } if in.Attribute != nil { attr, err := AttributePath(in.Attribute) @@ -41,6 +43,59 @@ func Diagnostics(in []*tfprotov6.Diagnostic) ([]*tfplugin6.Diagnostic, error) { return diagnostics, nil } +// forceValidUTF8 returns a string guaranteed to be valid UTF-8 even if the +// input isn't, by replacing any invalid bytes with a valid UTF-8 encoding of +// the Unicode Replacement Character (\uFFFD). +// +// The protobuf serialization library will reject invalid UTF-8 with an +// unhelpful error message: +// +// string field contains invalid UTF-8 +// +// Passing a string result through this function makes invalid UTF-8 instead +// emerge as placeholder characters on the other side of the wire protocol, +// giving a better chance of still returning a partially-legible message +// instead of a generic character encoding error. +// +// This is intended for user-facing messages such as diagnostic summary and +// detail messages, where Terraform will just treat the value as opaque and +// it's ultimately up to the user and their terminal or web browser to +// interpret the result. Don't use this for strings that have machine-readable +// meaning. +func forceValidUTF8(s string) string { + // Most strings that pass through here will already be valid UTF-8 and + // utf8.ValidString has a fast path which will beat our rune-by-rune + // analysis below, so it's worth the cost of walking the string twice + // in the rarer invalid case. + if utf8.ValidString(s) { + return s + } + + // If we get down here then we know there's at least one invalid UTF-8 + // sequence in the string, so in this slow path we'll reconstruct the + // string one rune at a time, guaranteeing that we'll only write valid + // UTF-8 sequences into the resulting buffer. + // + // Any invalid string will grow at least a little larger as a result of + // this operation because we'll be replacing each invalid byte with + // the three-byte sequence \xEF\xBF\xBD, which is the UTF-8 encoding of + // the replacement character \uFFFD. 9 is a magic number giving room for + // three such expansions without any further allocation. + ret := make([]byte, 0, len(s)+9) + for { + // If the first byte in s is not the start of a valid UTF-8 sequence + // then the following will return utf8.RuneError, 1, where + // utf8.RuneError is the unicode replacement character. + r, advance := utf8.DecodeRuneInString(s) + if advance == 0 { + break + } + s = s[advance:] + ret = utf8.AppendRune(ret, r) + } + return string(ret) +} + // we have to say this next thing to get golint to stop yelling at us about the // underscores in the function names. We want the function names to match // actually-generated code, so it feels like fair play. It's just a shame we diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE index c33dcc7c92..84cd064397 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE +++ b/vendor/github.com/hashicorp/terraform-plugin-log/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2021 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go index 8040699e4f..583dc2cbf5 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/options.go @@ -157,6 +157,47 @@ type LoggerOpts struct { MaskMessageStrings []string } +// Copy creates a duplicate LoggerOpts. This should be used to ensure +// safe LoggerOpts modification when the LoggerOpts could be saved into a +// new context.Context. +func (o LoggerOpts) Copy() LoggerOpts { + result := LoggerOpts{ + AdditionalLocationOffset: o.AdditionalLocationOffset, + Fields: make(map[string]any, len(o.Fields)), + IncludeLocation: o.IncludeLocation, + IncludeRootFields: o.IncludeRootFields, + IncludeTime: o.IncludeTime, + Level: o.Level, + MaskAllFieldValuesRegexes: make([]*regexp.Regexp, len(o.MaskAllFieldValuesRegexes)), + MaskAllFieldValuesStrings: make([]string, len(o.MaskAllFieldValuesStrings)), + MaskFieldValuesWithFieldKeys: make([]string, len(o.MaskFieldValuesWithFieldKeys)), + MaskMessageRegexes: make([]*regexp.Regexp, len(o.MaskMessageRegexes)), + MaskMessageStrings: make([]string, len(o.MaskMessageStrings)), + Name: o.Name, + OmitLogWithFieldKeys: make([]string, len(o.OmitLogWithFieldKeys)), + OmitLogWithMessageRegexes: make([]*regexp.Regexp, len(o.OmitLogWithMessageRegexes)), + OmitLogWithMessageStrings: make([]string, len(o.OmitLogWithMessageStrings)), + Output: o.Output, + } + + // Copy all slice/map contents to prevent leaking memory references + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + for key, value := range o.Fields { + result.Fields[key] = value + } + + copy(result.MaskAllFieldValuesRegexes, o.MaskAllFieldValuesRegexes) + copy(result.MaskAllFieldValuesStrings, o.MaskAllFieldValuesStrings) + copy(result.MaskFieldValuesWithFieldKeys, o.MaskFieldValuesWithFieldKeys) + copy(result.MaskMessageRegexes, o.MaskMessageRegexes) + copy(result.MaskMessageStrings, o.MaskMessageStrings) + copy(result.OmitLogWithFieldKeys, o.OmitLogWithFieldKeys) + copy(result.OmitLogWithMessageRegexes, o.OmitLogWithMessageRegexes) + copy(result.OmitLogWithMessageStrings, o.OmitLogWithMessageStrings) + + return result +} + // ApplyLoggerOpts generates a LoggerOpts out of a list of Option // implementations. By default, AdditionalLocationOffset is 1, IncludeLocation // is true, IncludeTime is true, and Output is os.Stderr. diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go index 0e6e8f50f0..c8f45a86ce 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/provider.go @@ -13,7 +13,12 @@ func GetProviderRootLogger(ctx context.Context) hclog.Logger { if logger == nil { return nil } - return logger.(hclog.Logger) + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + return hclogger } // GetProviderRootLoggerOptions returns the root logger options used for @@ -64,7 +69,13 @@ func GetProviderSubsystemLogger(ctx context.Context, subsystem string) hclog.Log if logger == nil { return nil } - return logger.(hclog.Logger) + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger } // SetProviderSubsystemLogger sets `logger` as the logger for the named diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go index 034e928272..217c83ed72 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sdk.go @@ -13,7 +13,13 @@ func GetSDKRootLogger(ctx context.Context) hclog.Logger { if logger == nil { return nil } - return logger.(hclog.Logger) + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger } // GetSDKRootLoggerOptions returns the root logger options used for @@ -80,7 +86,13 @@ func GetSDKSubsystemLogger(ctx context.Context, subsystem string) hclog.Logger { if logger == nil { return nil } - return logger.(hclog.Logger) + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger } // SetSDKSubsystemLogger sets `logger` as the logger for the named subsystem in diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go index d48f3bf4c1..b56ce8bfe7 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/internal/logging/sink.go @@ -13,7 +13,13 @@ func GetSink(ctx context.Context) hclog.Logger { if logger == nil { return nil } - return logger.(hclog.Logger) + + hclogger, ok := logger.(hclog.Logger) + if !ok { + return nil + } + + return hclogger } // GetSinkOptions returns the root logger options used for diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go index bc3897e324..c1a1572ce0 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/provider.go @@ -15,7 +15,9 @@ import ( func SetField(ctx context.Context, key string, value interface{}) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithField(key, value)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -144,16 +146,17 @@ func Error(ctx context.Context, msg string, additionalFields ...map[string]inter // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed -// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -167,16 +170,17 @@ func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -189,16 +193,17 @@ func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regex // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -212,16 +217,17 @@ func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) c // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value -// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -237,16 +243,17 @@ func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.C // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -262,16 +269,17 @@ func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regex // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -285,16 +293,17 @@ func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) c // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } @@ -308,16 +317,17 @@ func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) cont // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetProviderRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderRootTFLoggerOpts(ctx, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go index b491958765..1f66e757ad 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tflog/subsystem.go @@ -84,7 +84,9 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithField(key, value)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -223,16 +225,17 @@ func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed -// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -246,16 +249,17 @@ func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys . // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -268,16 +272,17 @@ func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, e // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -291,16 +296,17 @@ func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, m // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value -// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -316,16 +322,17 @@ func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -341,16 +348,17 @@ func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, e // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -364,16 +372,17 @@ func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, m // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -387,16 +396,17 @@ func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressi // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetProviderSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetProviderSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go index 3a749834c7..2891ddf29c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/sdk.go @@ -103,7 +103,9 @@ func NewRootProviderLogger(ctx context.Context, options ...logging.Option) conte func SetField(ctx context.Context, key string, value interface{}) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithField(key, value)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -227,16 +229,17 @@ func Error(ctx context.Context, msg string, additionalFields ...map[string]inter // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed -// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -250,16 +253,17 @@ func OmitLogWithFieldKeys(ctx context.Context, keys ...string) context.Context { // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -272,16 +276,17 @@ func OmitLogWithMessageRegexes(ctx context.Context, expressions ...*regexp.Regex // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -295,16 +300,17 @@ func OmitLogWithMessageStrings(ctx context.Context, matchingStrings ...string) c // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value -// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -320,16 +326,17 @@ func MaskFieldValuesWithFieldKeys(ctx context.Context, keys ...string) context.C // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -345,16 +352,17 @@ func MaskAllFieldValuesRegexes(ctx context.Context, expressions ...*regexp.Regex // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -368,16 +376,17 @@ func MaskAllFieldValuesStrings(ctx context.Context, matchingStrings ...string) c // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } @@ -391,16 +400,17 @@ func MaskMessageRegexes(ctx context.Context, expressions ...*regexp.Regexp) cont // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func MaskMessageStrings(ctx context.Context, matchingStrings ...string) context.Context { lOpts := logging.GetSDKRootTFLoggerOpts(ctx) - lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKRootTFLoggerOpts(ctx, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go index bfad8d9264..4d2a1a1a54 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go +++ b/vendor/github.com/hashicorp/terraform-plugin-log/tfsdklog/subsystem.go @@ -84,7 +84,9 @@ func NewSubsystem(ctx context.Context, subsystem string, options ...logging.Opti func SubsystemSetField(ctx context.Context, subsystem, key string, value interface{}) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithField(key, value)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithField(key, value)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -223,16 +225,17 @@ func SubsystemError(ctx context.Context, subsystem, msg string, additionalFields // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed -// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '...', 'bar': '...' }` -> omitted +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> printed +// log3 = `{ msg = "...", fields = { 'baz': '...', 'boo': '...' }` -> omitted func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -246,16 +249,17 @@ func SubsystemOmitLogWithFieldKeys(ctx context.Context, subsystem string, keys . // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -268,16 +272,17 @@ func SubsystemOmitLogWithMessageRegexes(ctx context.Context, subsystem string, e // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted -// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed -// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple foo", fields = {...}` -> omitted +// log2 = `{ msg = "pineapple mango", fields = {...}` -> printed +// log3 = `{ msg = "pineapple mango bar", fields = {...}` -> omitted func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithOmitLogWithMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -291,16 +296,17 @@ func SubsystemOmitLogWithMessageStrings(ctx context.Context, subsystem string, m // // Example: // -// configuration = `['foo', 'baz']` -// -// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value -// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value -// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value +// configuration = `['foo', 'baz']` // +// log1 = `{ msg = "...", fields = { 'foo': '***', 'bar': '...' }` -> masked value +// log2 = `{ msg = "...", fields = { 'bar': '...' }` -> as-is value +// log3 = `{ msg = "...", fields = { 'baz': '***', 'boo': '...' }` -> masked value func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string, keys ...string) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskFieldValuesWithFieldKeys(keys...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -314,16 +320,17 @@ func SubsystemMaskFieldValuesWithFieldKeys(ctx context.Context, subsystem string // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -337,16 +344,17 @@ func SubsystemMaskAllFieldValuesRegexes(ctx context.Context, subsystem string, e // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value -// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value -// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "...", fields = { 'k1': '***', 'k2': '***', 'k3': 'baz' }` -> masked value +// log2 = `{ msg = "...", fields = { 'k1': 'boo', 'k2': 'far', 'k3': 'baz' }` -> as-is value +// log2 = `{ msg = "...", fields = { 'k1': '*** *** baz' }` -> masked value func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskAllFieldValuesStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -360,16 +368,17 @@ func SubsystemMaskAllFieldValuesStrings(ctx context.Context, subsystem string, m // // Example: // -// configuration = `[regexp.MustCompile("(foo|bar)")]` -// -// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `[regexp.MustCompile("(foo|bar)")]` // +// log1 = `{ msg = "banana apple ***", fields = {...}` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressions ...*regexp.Regexp) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageRegexes(expressions...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } @@ -383,16 +392,17 @@ func SubsystemMaskMessageRegexes(ctx context.Context, subsystem string, expressi // // Example: // -// configuration = `['foo', 'bar']` -// -// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion -// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is -// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion +// configuration = `['foo', 'bar']` // +// log1 = `{ msg = "banana apple ***", fields = { 'k1': 'foo, bar, baz' }` -> masked portion +// log2 = `{ msg = "pineapple mango", fields = {...}` -> as-is +// log3 = `{ msg = "pineapple mango ***", fields = {...}` -> masked portion func SubsystemMaskMessageStrings(ctx context.Context, subsystem string, matchingStrings ...string) context.Context { lOpts := logging.GetSDKSubsystemTFLoggerOpts(ctx, subsystem) - lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts) + // Copy to prevent slice/map aliasing issues. + // Reference: https://github.com/hashicorp/terraform-plugin-log/issues/131 + lOpts = logging.WithMaskMessageStrings(matchingStrings...)(lOpts.Copy()) return logging.SetSDKSubsystemTFLoggerOpts(ctx, subsystem, lOpts) } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go index 263a1ff576..cf8edb36fc 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest/random.go @@ -10,11 +10,10 @@ import ( "fmt" "math/big" "math/rand" - "net" + "net/netip" "strings" "time" - "github.com/apparentlymart/go-cidr/cidr" "golang.org/x/crypto/ssh" ) @@ -56,8 +55,16 @@ func RandStringFromCharSet(strlen int, charSet string) string { return string(result) } -// RandSSHKeyPair generates a public and private SSH key pair. The public key is -// returned in OpenSSH format, and the private key is PEM encoded. +// RandSSHKeyPair generates a random public and private SSH key pair. +// +// The public key is returned in OpenSSH authorized key format, for example: +// +// ssh-rsa XXX comment +// +// The private key is RSA algorithm, 1024 bits, PEM encoded, and has no +// passphrase. Testing with different or stricter security requirements should +// use the standard library [crypto] and [golang.org/x/crypto/ssh] packages +// directly. func RandSSHKeyPair(comment string) (string, string, error) { privateKey, privateKeyPEM, err := genPrivateKey() if err != nil { @@ -74,6 +81,16 @@ func RandSSHKeyPair(comment string) (string, string, error) { // RandTLSCert generates a self-signed TLS certificate with a newly created // private key, and returns both the cert and the private key PEM encoded. +// +// The private key uses RSA algorithm, 1024 bits, and has no passphrase. +// +// The certificate expires in 24 hours, has a random serial number, and is +// set for Encipherment, Digital Signature, and Server Auth key usage. +// Only the organization name of the subject is configurable. +// +// Testing with different or stricter security requirements should +// use the standard library [crypto] and [golang.org/x/crypto] packages +// directly. func RandTLSCert(orgName string) (string, string, error) { template := &x509.Certificate{ SerialNumber: big.NewInt(int64(RandInt())), @@ -108,34 +125,57 @@ func RandTLSCert(orgName string) (string, string, error) { // RandIpAddress returns a random IP address in the specified CIDR block. // The prefix length must be less than 31. func RandIpAddress(s string) (string, error) { - _, network, err := net.ParseCIDR(s) + prefix, err := netip.ParsePrefix(s) + if err != nil { return "", err } - firstIp, lastIp := cidr.AddressRange(network) - first := &big.Int{} - first.SetBytes([]byte(firstIp)) - last := &big.Int{} - last.SetBytes([]byte(lastIp)) - r := &big.Int{} - r.Sub(last, first) - if bitLen := r.BitLen(); bitLen > 31 { - return "", fmt.Errorf("CIDR range is too large: %d", bitLen) + if prefix.IsSingleIP() { + return prefix.Addr().String(), nil } - max := int(r.Int64()) - if max == 0 { - // panic: invalid argument to Int31n - return firstIp.String(), nil + prefixSizeExponent := uint(prefix.Addr().BitLen() - prefix.Bits()) + + if prefix.Addr().Is4() && prefixSizeExponent > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", prefixSizeExponent) } - host, err := cidr.Host(network, RandIntRange(0, max)) - if err != nil { - return "", err + // Prevent panics with rand.Int63n(). + if prefix.Addr().Is6() && prefixSizeExponent > 63 { + return "", fmt.Errorf("CIDR range is too large: %d", prefixSizeExponent) + } + + // Calculate max random integer based on the prefix. + // Bit shift 1<.%s.%s", r.Type, r.Name) - } -} - -// resourceInstance is an address for a specific instance of a resource. -// When a resource is defined in configuration with "count" or "for_each" it -// produces zero or more instances, which can be addressed using this type. -type resourceInstance struct { - Resource resource - Key instanceKey -} - -func (r resourceInstance) ContainingResource() resource { - return r.Resource -} - -func (r resourceInstance) String() string { - if r.Key == NoKey { - return r.Resource.String() - } - return r.Resource.String() + r.Key.String() -} - -// absResource is an absolute address for a resource under a given module path. -type absResource struct { - Module ModuleInstance - Resource resource -} - -// Resource returns the address of a particular resource within the receiver. -func (m ModuleInstance) Resource(mode resourceMode, typeName string, name string) absResource { - return absResource{ - Module: m, - Resource: resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - } -} - -func (r absResource) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// absResourceInstance is an absolute address for a resource instance under a -// given module path. -type absResourceInstance struct { - Module ModuleInstance - Resource resourceInstance -} - -// ResourceInstance returns the address of a particular resource instance within the receiver. -func (m ModuleInstance) ResourceInstance(mode resourceMode, typeName string, name string, key instanceKey) absResourceInstance { - return absResourceInstance{ - Module: m, - Resource: resourceInstance{ - Resource: resource{ - Mode: mode, - Type: typeName, - Name: name, - }, - Key: key, - }, - } -} - -// ContainingResource returns the address of the resource that contains the -// receving resource instance. In other words, it discards the key portion -// of the address to produce an absResource value. -func (r absResourceInstance) ContainingResource() absResource { - return absResource{ - Module: r.Module, - Resource: r.Resource.ContainingResource(), - } -} - -func (r absResourceInstance) String() string { - if len(r.Module) == 0 { - return r.Resource.String() - } - return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) -} - -// resourceMode defines which lifecycle applies to a given resource. Each -// resource lifecycle has a slightly different address format. -type resourceMode rune - -//go:generate go run golang.org/x/tools/cmd/stringer -type resourceMode - -const ( - // InvalidResourceMode is the zero value of ResourceMode and is not - // a valid resource mode. - InvalidResourceMode resourceMode = 0 - - // ManagedResourceMode indicates a managed resource, as defined by - // "resource" blocks in configuration. - ManagedResourceMode resourceMode = 'M' - - // DataResourceMode indicates a data resource, as defined by - // "data" blocks in configuration. - DataResourceMode resourceMode = 'D' -) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go deleted file mode 100644 index 3b2e65c3ab..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by "stringer -type resourceMode"; DO NOT EDIT. - -package addrs - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidResourceMode-0] - _ = x[ManagedResourceMode-77] - _ = x[DataResourceMode-68] -} - -const ( - _resourceMode_name_0 = "InvalidResourceMode" - _resourceMode_name_1 = "DataResourceMode" - _resourceMode_name_2 = "ManagedResourceMode" -) - -func (i resourceMode) String() string { - switch { - case i == 0: - return _resourceMode_name_0 - case i == 68: - return _resourceMode_name_1 - case i == 77: - return _resourceMode_name_2 - default: - return "resourceMode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go index 6cddb9c63b..c41751b71c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go @@ -108,7 +108,10 @@ type NestedBlock struct { // blocks. type NestingMode int -//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=NestingMode +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. const ( nestingModeInvalid NestingMode = iota diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go index 159399350f..dba079eb26 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugintest/working_dir.go @@ -33,9 +33,6 @@ type WorkingDir struct { // was stored; empty until SetConfig is called. configFilename string - // baseArgs is arguments that should be appended to all commands - baseArgs []string - // tf is the instance of tfexec.Terraform used for running Terraform commands tf *tfexec.Terraform diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go index a36da370f8..21fb177262 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go @@ -7,7 +7,10 @@ type Diagnostic interface { type Severity rune -//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=Severity +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. const ( Error Severity = 'E' diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go index 5d313e79dc..2e04b72a62 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go @@ -26,24 +26,6 @@ func (diags Diagnostics) HasErrors() bool { return false } -// ForRPC returns a version of the receiver that has been simplified so that -// it is friendly to RPC protocols. -// -// Currently this means that it can be serialized with encoding/gob and -// subsequently re-inflated. It may later grow to include other serialization -// formats. -// -// Note that this loses information about the original objects used to -// construct the diagnostics, so e.g. the errwrap API will not work as -// expected on an error-wrapped Diagnostics that came from ForRPC. -func (diags Diagnostics) ForRPC() Diagnostics { - ret := make(Diagnostics, len(diags)) - for i := range diags { - ret[i] = makeRPCFriendlyDiag(diags[i]) - } - return ret -} - // Err flattens a diagnostics list into a single Go error, or to nil // if the diagnostics list does not include any error-level diagnostics. // diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go deleted file mode 100644 index 1b78887eb9..0000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go +++ /dev/null @@ -1,41 +0,0 @@ -package tfdiags - -import ( - "encoding/gob" -) - -type rpcFriendlyDiag struct { - Severity_ Severity - Summary_ string - Detail_ string -} - -// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to -// RPC. -// -// In particular, it currently returns an object that can be serialized and -// later re-inflated using gob. This definition may grow to include other -// serializations later. -func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { - desc := diag.Description() - return &rpcFriendlyDiag{ - Severity_: diag.Severity(), - Summary_: desc.Summary, - Detail_: desc.Detail, - } -} - -func (d *rpcFriendlyDiag) Severity() Severity { - return d.Severity_ -} - -func (d *rpcFriendlyDiag) Description() Description { - return Description{ - Summary: d.Summary_, - Detail: d.Detail_, - } -} - -func init() { - gob.Register((*rpcFriendlyDiag)(nil)) -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go index b01e5a48df..c0d1288f00 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go @@ -1,6 +1,9 @@ package terraform -//go:generate go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. // instanceType is an enum of the various types of instances store in the State type instanceType int diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go index c83643a65c..d2a806c0ba 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go @@ -1,6 +1,9 @@ package terraform -//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +// This code was previously generated with a go:generate directive calling: +// go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +// However, it is now considered frozen and the tooling dependency has been +// removed. The String method can be manually updated if necessary. // ResourceMode is deprecated, use addrs.ResourceMode instead. // It has been preserved for backwards compatibility. diff --git a/vendor/github.com/hashicorp/terraform-registry-address/LICENSE b/vendor/github.com/hashicorp/terraform-registry-address/LICENSE index c33dcc7c92..84cd064397 100644 --- a/vendor/github.com/hashicorp/terraform-registry-address/LICENSE +++ b/vendor/github.com/hashicorp/terraform-registry-address/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2021 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go index 541b9a4901..bc79df8cf3 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -43,7 +43,7 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { out = out.WithoutOptionalAttributesDeep() if !isKnown { - return cty.UnknownVal(dynamicReplace(in.Type(), out)), nil + return prepareUnknownResult(in.Range(), dynamicReplace(in.Type(), out)), nil } if isNull { @@ -199,3 +199,64 @@ func retConversion(conv conversion) Conversion { return conv(in, cty.Path(nil)) } } + +// prepareUnknownResult can apply value refinements to a returned unknown value +// in certain cases where characteristics of the source value or type can +// transfer into range constraints on the result value. +func prepareUnknownResult(sourceRange cty.ValueRange, targetTy cty.Type) cty.Value { + sourceTy := sourceRange.TypeConstraint() + + ret := cty.UnknownVal(targetTy) + if sourceRange.DefinitelyNotNull() { + ret = ret.RefineNotNull() + } + + switch { + case sourceTy.IsObjectType() && targetTy.IsMapType(): + // A map built from an object type always has the same number of + // elements as the source type has attributes. + return ret.Refine().CollectionLength(len(sourceTy.AttributeTypes())).NewValue() + case sourceTy.IsTupleType() && targetTy.IsListType(): + // A list built from a typle type always has the same number of + // elements as the source type has elements. + return ret.Refine().CollectionLength(sourceTy.Length()).NewValue() + case sourceTy.IsTupleType() && targetTy.IsSetType(): + // When building a set from a tuple type we can't exactly constrain + // the length because some elements might coalesce, but we can + // guarantee an upper limit. We can also guarantee at least one + // element if the tuple isn't empty. + switch l := sourceTy.Length(); l { + case 0, 1: + return ret.Refine().CollectionLength(l).NewValue() + default: + return ret.Refine(). + CollectionLengthLowerBound(1). + CollectionLengthUpperBound(sourceTy.Length()). + NewValue() + } + case sourceTy.IsCollectionType() && targetTy.IsCollectionType(): + // NOTE: We only reach this function if there is an available + // conversion between the source and target type, so we don't + // need to repeat element type compatibility checks and such here. + // + // If the source value already has a refined length then we'll + // transfer those refinements to the result, because conversion + // does not change length (aside from set element coalescing). + b := ret.Refine() + if targetTy.IsSetType() { + if sourceRange.LengthLowerBound() > 0 { + // If the source has at least one element then the result + // must always have at least one too, because value coalescing + // cannot totally empty the set. + b = b.CollectionLengthLowerBound(1) + } + } else { + b = b.CollectionLengthLowerBound(sourceRange.LengthLowerBound()) + } + b = b.CollectionLengthUpperBound(sourceRange.LengthUpperBound()) + return b.NewValue() + default: + return ret + } + +} diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go new file mode 100644 index 0000000000..0ea7f984e4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/doc.go @@ -0,0 +1,26 @@ +// Package ctystrings is a collection of string manipulation utilities which +// intend to help application developers implement string-manipulation +// functionality in a way that respects the cty model of strings, even when +// they are working in the realm of Go strings. +// +// cty strings are, internally, NFC-normalized as defined in Unicode Standard +// Annex #15 and encoded as UTF-8. +// +// When working with [cty.Value] of string type cty manages this +// automatically as an implementation detail, but when applications call +// [Value.AsString] they will receive a value that has been subjected to that +// normalization, and so may need to take that normalization into account when +// manipulating the resulting string or comparing it with other Go strings +// that did not originate in a [cty.Value]. +// +// Although the core representation of [cty.String] only considers whole +// strings, it's also conventional in other locations such as the standard +// library functions to consider strings as being sequences of grapheme +// clusters as defined by Unicode Standard Annex #29, which adds further +// rules about combining multiple consecutive codepoints together into a +// single user-percieved character. Functions that work with substrings should +// always use grapheme clusters as their smallest unit of splitting strings, +// and never break strings in the middle of a grapheme cluster. The functions +// in this package respect that convention unless otherwise stated in their +// documentation. +package ctystrings diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go new file mode 100644 index 0000000000..9b3bce9031 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/normalize.go @@ -0,0 +1,14 @@ +package ctystrings + +import ( + "golang.org/x/text/unicode/norm" +) + +// Normalize applies NFC normalization to the given string, returning the +// transformed string. +// +// This function achieves the same effect as wrapping a string in a value +// using [cty.StringVal] and then unwrapping it again using [Value.AsString]. +func Normalize(str string) string { + return norm.NFC.String(str) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go b/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go new file mode 100644 index 0000000000..1d9f5c5a1f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/ctystrings/prefix.go @@ -0,0 +1,139 @@ +package ctystrings + +import ( + "fmt" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/v13/textseg" + "golang.org/x/text/unicode/norm" +) + +// SafeKnownPrefix takes a string intended to represent a known prefix of +// another string and modifies it so that it would be safe to use with +// byte-based prefix matching against another NFC-normalized string. It +// also takes into account grapheme cluster boundaries and trims off any +// suffix that could potentially be an incomplete grapheme cluster. +// +// Specifically, SafeKnownPrefix first applies NFC normalization to the prefix +// and then trims off one or more characters from the end of the string which +// could potentially be transformed into a different character if another +// string were appended to it. For example, a trailing latin letter will +// typically be trimmed because appending a combining diacritic mark would +// transform it into a different character. +// +// This transformation is important whenever the remainder of the string is +// arbitrary user input not directly controlled by the application. If an +// application can guarantee that the remainder of the string will not begin +// with combining marks then it is safe to instead just normalize the prefix +// string with [Normalize]. +// +// Note that this function only takes into account normalization boundaries +// and does _not_ take into account grapheme cluster boundaries as defined +// by Unicode Standard Annex #29. +func SafeKnownPrefix(prefix string) string { + prefix = Normalize(prefix) + + // Our starting approach here is essentially what a streaming parser would + // do when consuming a Unicode string in chunks and needing to determine + // what prefix of the current buffer is safe to process without waiting for + // more information, which is described in TR15 section 13.1 + // "Buffering with Unicode Normalization": + // https://unicode.org/reports/tr15/#Buffering_with_Unicode_Normalization + // + // The general idea here is to find the last character in the string that + // could potentially start a sequence of codepoints that would combine + // together, and then truncate the string to exclude that character and + // everything after it. + + form := norm.NFC + lastBoundary := form.LastBoundary([]byte(prefix)) + if lastBoundary != -1 && lastBoundary != len(prefix) { + prefix = prefix[:lastBoundary] + // If we get here then we've already shortened the prefix and so + // further analysis below is unnecessary because it would be relying + // on an incomplete prefix anyway. + return prefix + } + + // Now we'll use the textseg package's grapheme cluster scanner to scan + // as far through the string as we can without the scanner telling us + // that it would need more bytes to decide. + // + // This step is conservative because the grapheme cluster rules are not + // designed with prefix-matching in mind. In the base case we'll just + // always discard the last grapheme cluster, although we do have some + // special cases for trailing codepoints that can't possibly combine with + // subsequent codepoints to form a single grapheme cluster and which seem + // likely to arise often in practical use. + remain := []byte(prefix) + prevBoundary := 0 + thisBoundary := 0 + for len(remain) > 0 { + advance, _, err := textseg.ScanGraphemeClusters(remain, false) + if err != nil { + // ScanGraphemeClusters should never return an error because + // any sequence of valid UTF-8 encodings is valid input. + panic(fmt.Sprintf("textseg.ScanGraphemeClusters returned error: %s", err)) + } + if advance == 0 { + // If we have at least one byte remaining but the scanner cannot + // advance then that means the remainder might be an incomplete + // grapheme cluster and so we need to stop here, discarding the + // rest of the input. However, we do now know that we can safely + // include what we found on the previous iteration of this loop. + prevBoundary = thisBoundary + break + } + prevBoundary = thisBoundary + thisBoundary += advance + remain = remain[advance:] + } + + // This is our heuristic for detecting cases where we can be sure that + // the above algorithm was too conservative because the last segment + // we found is definitely not subject to the grapheme cluster "do not split" + // rules. + suspect := prefix[prevBoundary:thisBoundary] + if sequenceMustEndGraphemeCluster(suspect) { + prevBoundary = thisBoundary + } + + return prefix[:prevBoundary] +} + +// sequenceMustEndGraphemeCluster is a heuristic we use to avoid discarding +// the final grapheme cluster of a prefix in SafeKnownPrefix by recognizing +// that a particular sequence is one known to not be subject to any of +// the UAX29 "do not break" rules. +// +// If this function returns true then it is safe to include the given byte +// sequence at the end of a safe prefix. Otherwise we don't know whether or +// not it is safe. +func sequenceMustEndGraphemeCluster(s string) bool { + // For now we're only considering sequences that represent a single + // codepoint. We'll assume that any sequence of two or more codepoints + // that could be a grapheme cluster might be extendable. + if utf8.RuneCountInString(s) != 1 { + return false + } + + r, _ := utf8.DecodeRuneInString(s) + + // Our initial ruleset is focused on characters that are commonly used + // as delimiters in text intended for both human and machine use, such + // as JSON documents. + // + // We don't include any letters or digits of any script here intentionally + // because those are the ones most likely to be subject to combining rules + // in either current or future Unicode specifications. + // + // We can safely grow this set over time, but we should be very careful + // about shrinking it because it could cause value refinements to loosen + // and thus cause results that were once known to become unknown. + switch r { + case '-', '_', ':', ';', '/', '\\', ',', '.', '(', ')', '{', '}', '[', ']', '|', '?', '!', '~', ' ', '\t', '@', '#', '$', '%', '^', '&', '*', '+', '"', '\'': + return true + default: + return false + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go index c4d99f6c91..6fc968282e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/function.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -39,6 +39,19 @@ type Spec struct { // depending on its arguments. Type TypeFunc + // RefineResult is an optional callback for describing additional + // refinements for the result value beyond what can be described using + // a type constraint. + // + // A refinement callback should always return the same builder it was + // given, typically after modifying it using the methods of + // [cty.RefinementBuilder]. + // + // Any refinements described by this callback must hold for the entire + // range of results from the function. For refinements that only apply + // to certain results, use direct refinement within [Impl] instead. + RefineResult func(*cty.RefinementBuilder) *cty.RefinementBuilder + // Impl is the ImplFunc that implements the function's behavior. // // Functions are expected to behave as pure functions, and not create @@ -109,20 +122,13 @@ func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { return f.ReturnTypeForValues(vals) } -// ReturnTypeForValues is similar to ReturnType but can be used if the caller -// already knows the values of some or all of the arguments, in which case -// the function may be able to determine a more definite result if its -// return type depends on the argument *values*. -// -// For any arguments whose values are not known, pass an Unknown value of -// the appropriate type. -func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { +func (f Function) returnTypeForValues(args []cty.Value) (ty cty.Type, dynTypedArgs bool, err error) { var posArgs []cty.Value var varArgs []cty.Value if f.spec.VarParam == nil { if len(args) != len(f.spec.Params) { - return cty.Type{}, fmt.Errorf( + return cty.Type{}, false, fmt.Errorf( "wrong number of arguments (%d required; %d given)", len(f.spec.Params), len(args), ) @@ -132,7 +138,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) varArgs = nil } else { if len(args) < len(f.spec.Params) { - return cty.Type{}, fmt.Errorf( + return cty.Type{}, false, fmt.Errorf( "wrong number of arguments (at least %d required; %d given)", len(f.spec.Params), len(args), ) @@ -161,7 +167,7 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(i, "argument must not be null") + return cty.Type{}, false, NewArgErrorf(i, "argument must not be null") } // AllowUnknown is ignored for type-checking, since we expect to be @@ -171,13 +177,13 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { - return cty.DynamicPseudoType, nil + return cty.DynamicPseudoType, true, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... - return cty.Type{}, NewArgError(i, errs[0]) + return cty.Type{}, false, NewArgError(i, errs[0]) } } @@ -196,18 +202,18 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } if val.IsNull() && !spec.AllowNull { - return cty.Type{}, NewArgErrorf(realI, "argument must not be null") + return cty.Type{}, false, NewArgErrorf(realI, "argument must not be null") } if val.Type() == cty.DynamicPseudoType { if !spec.AllowDynamicType { - return cty.DynamicPseudoType, nil + return cty.DynamicPseudoType, true, nil } } else if errs := val.Type().TestConformance(spec.Type); errs != nil { // For now we'll just return the first error in the set, since // we don't have a good way to return the whole list here. // Would be good to do something better at some point... - return cty.Type{}, NewArgError(i, errs[0]) + return cty.Type{}, false, NewArgError(i, errs[0]) } } } @@ -221,17 +227,53 @@ func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) } }() - return f.spec.Type(args) + ty, err = f.spec.Type(args) + return ty, false, err +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + ty, _, err = f.returnTypeForValues(args) + return ty, err } // Call actually calls the function with the given arguments, which must // conform to the function's parameter specification or an error will be // returned. func (f Function) Call(args []cty.Value) (val cty.Value, err error) { - expectedType, err := f.ReturnTypeForValues(args) + expectedType, dynTypeArgs, err := f.returnTypeForValues(args) if err != nil { return cty.NilVal, err } + if dynTypeArgs { + // returnTypeForValues sets this if any argument was inexactly typed + // and the corresponding parameter did not indicate it could deal with + // that. In that case we also avoid calling the implementation function + // because it will also typically not be ready to deal with that case. + return cty.UnknownVal(expectedType), nil + } + + if refineResult := f.spec.RefineResult; refineResult != nil { + // If this function has a refinement callback then we'll refine + // our result value in the same way regardless of how we return. + // It's the function author's responsibility to ensure that the + // refinements they specify are valid for the full range of possible + // return values from the function. If not, this will panic when + // detecting an inconsistency. + defer func() { + if val != cty.NilVal { + if val.IsKnown() || val.Type() != cty.DynamicPseudoType { + val = val.RefineWith(refineResult) + } + } + }() + } // Type checking already dealt with most situations relating to our // parameter specification, but we still need to deal with unknown diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go index 8192d8ce86..2826bf6eb0 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -15,7 +15,8 @@ var NotFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Not(), nil }, @@ -37,7 +38,8 @@ var AndFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].And(args[1]), nil }, @@ -59,7 +61,8 @@ var OrFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Or(args[1]), nil }, diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go index 3fe600ff1a..fe67e6f3fe 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -38,7 +38,8 @@ var BytesLenFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bufPtr := args[0].EncapsulatedValue().(*[]byte) return cty.NumberIntVal(int64(len(*bufPtr))), nil @@ -65,7 +66,8 @@ var BytesSliceFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(Bytes), + Type: function.StaticReturnType(Bytes), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bufPtr := args[0].EncapsulatedValue().(*[]byte) diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go index 0573e74e3b..1816bb9c96 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -32,6 +32,7 @@ var HasIndexFunc = function.New(&function.Spec{ } return cty.Bool, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].HasIndex(args[1]), nil }, @@ -114,6 +115,7 @@ var LengthFunc = function.New(&function.Spec{ Name: "collection", Type: cty.DynamicPseudoType, AllowDynamicType: true, + AllowUnknown: true, AllowMarked: true, }, }, @@ -124,6 +126,7 @@ var LengthFunc = function.New(&function.Spec{ } return cty.Number, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Length(), nil }, @@ -251,6 +254,7 @@ var CoalesceListFunc = function.New(&function.Spec{ return last, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { for _, arg := range args { if !arg.IsKnown() { @@ -283,7 +287,8 @@ var CompactFunc = function.New(&function.Spec{ Type: cty.List(cty.String), }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] if !listVal.IsWhollyKnown() { @@ -324,7 +329,8 @@ var ContainsFunc = function.New(&function.Spec{ Type: cty.DynamicPseudoType, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { arg := args[0] ty := arg.Type() @@ -382,6 +388,7 @@ var DistinctFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { return args[0].Type(), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] @@ -426,6 +433,7 @@ var ChunklistFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { return cty.List(args[0].Type()), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { listVal := args[0] sizeVal := args[1] @@ -513,6 +521,7 @@ var FlattenFunc = function.New(&function.Spec{ } return cty.Tuple(tys), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { inputList := args[0] @@ -611,6 +620,7 @@ var KeysFunc = function.New(&function.Spec{ return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { // We must unmark the value before we can use ElementIterator on it, and // then re-apply the same marks (possibly none) when we return. Since we @@ -832,6 +842,7 @@ var MergeFunc = function.New(&function.Spec{ return cty.Object(attrs), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { outputMap := make(map[string]cty.Value) var markses []cty.ValueMarks // remember any marked maps/objects we find @@ -891,6 +902,7 @@ var ReverseListFunc = function.New(&function.Spec{ return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { in, marks := args[0].Unmark() inVals := in.AsValueSlice() @@ -919,10 +931,11 @@ var SetProductFunc = function.New(&function.Spec{ Description: `Calculates the cartesian product of two or more sets.`, Params: []function.Parameter{}, VarParam: &function.Parameter{ - Name: "sets", - Description: "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering", - Type: cty.DynamicPseudoType, - AllowMarked: true, + Name: "sets", + Description: "The sets to consider. Also accepts lists and tuples, and if all arguments are of list or tuple type then the result will preserve the input ordering", + Type: cty.DynamicPseudoType, + AllowMarked: true, + AllowUnknown: true, }, Type: func(args []cty.Value) (retType cty.Type, err error) { if len(args) < 2 { @@ -964,6 +977,7 @@ var SetProductFunc = function.New(&function.Spec{ } return cty.Set(cty.Tuple(elemTys)), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { ety := retType.ElementType() var retMarks cty.ValueMarks @@ -976,7 +990,7 @@ var SetProductFunc = function.New(&function.Spec{ // Continue processing after we find an argument with unknown // length to ensure that we cover all the marks - if !arg.Length().IsKnown() { + if !(arg.IsKnown() && arg.Length().IsKnown()) { hasUnknownLength = true continue } @@ -988,7 +1002,62 @@ var SetProductFunc = function.New(&function.Spec{ } if hasUnknownLength { - return cty.UnknownVal(retType).WithMarks(retMarks), nil + defer func() { + // We're definitely going to return from somewhere in this + // branch and however we do it we must reapply the marks + // on the way out. + ret = ret.WithMarks(retMarks) + }() + ret := cty.UnknownVal(retType) + + // Even if we don't know the exact length we may be able to + // constrain the upper and lower bounds of the resulting length. + maxLength := 1 + for _, arg := range args { + arg, _ := arg.Unmark() // safe to discard marks because "retMarks" already contains them all + argRng := arg.Range() + ty := argRng.TypeConstraint() + var argMaxLen int + if ty.IsCollectionType() { + argMaxLen = argRng.LengthUpperBound() + } else if ty.IsTupleType() { + argMaxLen = ty.Length() + } else { + // Should not get here but if we do then we'll just + // bail out with an unrefined unknown value. + return ret, nil + } + // The upper bound of a totally-unrefined collection is + // math.MaxInt, which will quickly get us to integer overflow + // here, and so out of pragmatism we'll just impose a reasonable + // upper limit on what is a useful bound to track and return + // unrefined for unusually-large input. + if argMaxLen > 1024 { // arbitrarily-decided threshold + return ret, nil + } + maxLength *= argMaxLen + if maxLength > 2048 { // arbitrarily-decided threshold + return ret, nil + } + if maxLength < 0 { // Seems like we already overflowed, then. + return ret, nil + } + } + + if maxLength == 0 { + // This refinement will typically allow the unknown value to + // collapse into a known empty collection. + ret = ret.Refine().CollectionLength(0).NewValue() + } else { + // If we know there's a nonzero maximum number of elements then + // set element coalescing cannot reduce to fewer than one + // element. + ret = ret.Refine(). + CollectionLengthLowerBound(1). + CollectionLengthUpperBound(maxLength). + NewValue() + } + return ret, nil } if total == 0 { @@ -1101,6 +1170,7 @@ var SliceFunc = function.New(&function.Spec{ } return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { inputList, marks := args[0].Unmark() @@ -1215,6 +1285,7 @@ var ValuesFunc = function.New(&function.Spec{ } return cty.NilType, errors.New("values() requires a map as the first argument") }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { mapVar := args[0] @@ -1303,6 +1374,7 @@ var ZipmapFunc = function.New(&function.Spec{ return cty.NilType, errors.New("values argument must be a list or tuple value") } }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { keys := args[0] values := args[1] diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go index f61b53409a..5d06a4519e 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/conversion.go @@ -87,3 +87,36 @@ func MakeToFunc(wantTy cty.Type) function.Function { }, }) } + +// AssertNotNullFunc is a function which does nothing except return an error +// if the argument given to it is null. +// +// This could be useful in some cases where the automatic refinment of +// nullability isn't precise enough, because the result is guaranteed to not +// be null and can therefore allow downstream comparisons to null to return +// a known value even if the value is otherwise unknown. +var AssertNotNullFunc = function.New(&function.Spec{ + Description: "Returns the given value varbatim if it is non-null, or raises an error if it's null.", + Params: []function.Parameter{ + { + Name: "v", + Type: cty.DynamicPseudoType, + // NOTE: We intentionally don't set AllowNull here, and so + // the function system will automatically reject a null argument + // for us before calling Impl. + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + RefineResult: refineNonNull, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // Our argument doesn't set AllowNull: true, so we're guaranteed to + // have a non-null value in args[0]. + return args[0], nil + }, +}) + +func AssertNotNull(v cty.Value) (cty.Value, error) { + return AssertNotNullFunc.Call([]cty.Value{v}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go index 20d82bcdba..e854e817b2 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -43,6 +43,7 @@ var CSVDecodeFunc = function.New(&function.Spec{ } return cty.List(cty.Object(atys)), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { ety := retType.ElementType() atys := ety.AttributeTypes() diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go index 6c0ee05e92..85f58d4cc7 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -23,7 +23,8 @@ var FormatDateFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { formatStr := args[0].AsString() timeStr := args[1].AsString() @@ -281,67 +282,6 @@ func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { return FormatDateFunc.Call([]cty.Value{format, timestamp}) } -func parseTimestamp(ts string) (time.Time, error) { - t, err := time.Parse(time.RFC3339, ts) - if err != nil { - switch err := err.(type) { - case *time.ParseError: - // If err is s time.ParseError then its string representation is not - // appropriate since it relies on details of Go's strange date format - // representation, which a caller of our functions is not expected - // to be familiar with. - // - // Therefore we do some light transformation to get a more suitable - // error that should make more sense to our callers. These are - // still not awesome error messages, but at least they refer to - // the timestamp portions by name rather than by Go's example - // values. - if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { - // For some reason err.Message is populated with a ": " prefix - // by the time package. - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) - } - var what string - switch err.LayoutElem { - case "2006": - what = "year" - case "01": - what = "month" - case "02": - what = "day of month" - case "15": - what = "hour" - case "04": - what = "minute" - case "05": - what = "second" - case "Z07:00": - what = "UTC offset" - case "T": - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") - case ":", "-": - if err.ValueElem == "" { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) - } else { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) - } - default: - // Should never get here, because time.RFC3339 includes only the - // above portions, but since that might change in future we'll - // be robust here. - what = "timestamp segment" - } - if err.ValueElem == "" { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) - } else { - return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) - } - } - return time.Time{}, err - } - return t, nil -} - // splitDataFormat is a bufio.SplitFunc used to tokenize a date format. func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { if len(data) == 0 { @@ -418,6 +358,75 @@ func startsDateFormatVerb(b byte) bool { return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') } +func parseTimestamp(ts string) (time.Time, error) { + t, err := parseStrictRFC3339(ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because RFC3339 includes only the + // above portions. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + switch { + case what == "hour" && strings.Contains(err.ValueElem, ":"): + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must be between 0 and 23 inclusive") + case what == "hour" && len(err.ValueElem) != 2: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: hour must have exactly two digits") + case what == "minute" && len(err.ValueElem) != 2: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: minute must have exactly two digits") + default: + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + } + return time.Time{}, err + } + return t, nil +} + // TimeAdd adds a duration to a timestamp, returning a new timestamp. // // In the HCL language, timestamps are conventionally represented as diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go new file mode 100644 index 0000000000..687854f378 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime_rfc3339.go @@ -0,0 +1,219 @@ +package stdlib + +import ( + "errors" + "strconv" + "time" +) + +// This file inlines some RFC3339 parsing code that was added to the Go standard +// library's "time" package during the Go 1.20 development period but then +// reverted prior to release to follow the Go proposals process first. +// +// Our goal is to support only valid RFC3339 strings regardless of what version +// of Go is being used, because the Go stdlib is just an implementation detail +// of the cty stdlib and so these functions should not very their behavior +// significantly due to being compiled against a different Go version. +// +// These inline copies of the code from upstream should likely stay here +// indefinitely even if functionality like this _is_ accepted in a later version +// of Go, because this now defines cty's definition of RFC3339 parsing as +// intentionally independent of Go's. + +func parseStrictRFC3339(str string) (time.Time, error) { + t, ok := parseRFC3339(str) + if !ok { + // If parsing failed then we'll try to use time.Parse to gather up a + // helpful error object. + _, err := time.Parse(time.RFC3339, str) + if err != nil { + return time.Time{}, err + } + + // The parse template syntax cannot correctly validate RFC 3339. + // Explicitly check for cases that Parse is unable to validate for. + // See https://go.dev/issue/54580. + num2 := func(str string) byte { return 10*(str[0]-'0') + (str[1] - '0') } + switch { + case str[len("2006-01-02T")+1] == ':': // hour must be two digits + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "15", + ValueElem: str[len("2006-01-02T"):][:1], + Message: ": hour must have two digits", + } + case str[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: ".", + ValueElem: ",", + Message: ": sub-second separator must be a period", + } + case str[len(str)-1] != 'Z': + switch { + case num2(str[len(str)-len("07:00"):]) >= 24: // timezone hour must be in range + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "Z07:00", + ValueElem: str[len(str)-len("Z07:00"):], + Message: ": timezone hour out of range", + } + case num2(str[len(str)-len("00"):]) >= 60: // timezone minute must be in range + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: "Z07:00", + ValueElem: str[len(str)-len("Z07:00"):], + Message: ": timezone minute out of range", + } + } + default: // unknown error; should not occur + return time.Time{}, &time.ParseError{ + Layout: time.RFC3339, + Value: str, + LayoutElem: time.RFC3339, + ValueElem: str, + Message: "", + } + } + } + return t, nil +} + +func parseRFC3339(s string) (time.Time, bool) { + // parseUint parses s as an unsigned decimal integer and + // verifies that it is within some range. + // If it is invalid or out-of-range, + // it sets ok to false and returns the min value. + ok := true + parseUint := func(s string, min, max int) (x int) { + for _, c := range []byte(s) { + if c < '0' || '9' < c { + ok = false + return min + } + x = x*10 + int(c) - '0' + } + if x < min || max < x { + ok = false + return min + } + return x + } + + // Parse the date and time. + if len(s) < len("2006-01-02T15:04:05") { + return time.Time{}, false + } + year := parseUint(s[0:4], 0, 9999) // e.g., 2006 + month := parseUint(s[5:7], 1, 12) // e.g., 01 + day := parseUint(s[8:10], 1, daysIn(time.Month(month), year)) // e.g., 02 + hour := parseUint(s[11:13], 0, 23) // e.g., 15 + min := parseUint(s[14:16], 0, 59) // e.g., 04 + sec := parseUint(s[17:19], 0, 59) // e.g., 05 + if !ok || !(s[4] == '-' && s[7] == '-' && s[10] == 'T' && s[13] == ':' && s[16] == ':') { + return time.Time{}, false + } + s = s[19:] + + // Parse the fractional second. + var nsec int + if len(s) >= 2 && s[0] == '.' && isDigit(s, 1) { + n := 2 + for ; n < len(s) && isDigit(s, n); n++ { + } + nsec, _, _ = parseNanoseconds(s, n) + s = s[n:] + } + + // Parse the time zone. + loc := time.UTC + if len(s) != 1 || s[0] != 'Z' { + if len(s) != len("-07:00") { + return time.Time{}, false + } + hr := parseUint(s[1:3], 0, 23) // e.g., 07 + mm := parseUint(s[4:6], 0, 59) // e.g., 00 + if !ok || !((s[0] == '-' || s[0] == '+') && s[3] == ':') { + return time.Time{}, false + } + zoneOffsetSecs := (hr*60 + mm) * 60 + if s[0] == '-' { + zoneOffsetSecs = -zoneOffsetSecs + } + loc = time.FixedZone("", zoneOffsetSecs) + } + t := time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) + + return t, true +} + +func isDigit(s string, i int) bool { + if len(s) <= i { + return false + } + c := s[i] + return '0' <= c && c <= '9' +} + +func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) { + if value[0] != '.' && value[0] != ',' { + err = errBadTimestamp + return + } + if nbytes > 10 { + value = value[:10] + nbytes = 10 + } + if ns, err = strconv.Atoi(value[1:nbytes]); err != nil { + return + } + if ns < 0 { + rangeErrString = "fractional second" + return + } + // We need nanoseconds, which means scaling by the number + // of missing digits in the format, maximum length 10. + scaleDigits := 10 - nbytes + for i := 0; i < scaleDigits; i++ { + ns *= 10 + } + return +} + +// These are internal errors used by the date parsing code and are not ever +// returned by public functions. +var errBadTimestamp = errors.New("bad value for field") + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m time.Month, year int) int { + if m == time.February && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go index ca163a876d..d04a5eec78 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -26,17 +26,27 @@ var FormatFunc = function.New(&function.Spec{ }, }, VarParam: &function.Parameter{ - Name: "args", - Type: cty.DynamicPseudoType, - AllowNull: true, + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { for _, arg := range args[1:] { if !arg.IsWhollyKnown() { // We require all nested values to be known because the only // thing we can do for a collection/structural type is print // it as JSON and that requires it to be wholly known. + // However, we might be able to refine the result with a + // known prefix, if there are literal characters before the + // first formatting verb. + f := args[0].AsString() + if idx := strings.IndexByte(f, '%'); idx > 0 { + prefix := f[:idx] + return cty.UnknownVal(cty.String).Refine().StringPrefix(prefix).NewValue(), nil + } return cty.UnknownVal(cty.String), nil } } @@ -59,7 +69,8 @@ var FormatListFunc = function.New(&function.Spec{ AllowNull: true, AllowUnknown: true, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { fmtVal := args[0] args = args[1:] @@ -164,7 +175,7 @@ var FormatListFunc = function.New(&function.Spec{ // We require all nested values to be known because the only // thing we can do for a collection/structural type is print // it as JSON and that requires it to be wholly known. - ret = append(ret, cty.UnknownVal(cty.String)) + ret = append(ret, cty.UnknownVal(cty.String).RefineNotNull()) continue Results } } diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go index 4f70fff94c..627b55a5cc 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -26,7 +26,8 @@ var EqualFunc = function.New(&function.Spec{ AllowNull: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Equals(args[1]), nil }, @@ -50,7 +51,8 @@ var NotEqualFunc = function.New(&function.Spec{ AllowNull: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].Equals(args[1]).Not(), nil }, @@ -77,6 +79,7 @@ var CoalesceFunc = function.New(&function.Spec{ } return retType, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { for _, argVal := range args { if !argVal.IsKnown() { @@ -92,6 +95,10 @@ var CoalesceFunc = function.New(&function.Spec{ }, }) +func refineNonNull(b *cty.RefinementBuilder) *cty.RefinementBuilder { + return b.NotNull() +} + // Equal determines whether the two given values are equal, returning a // bool value. func Equal(a cty.Value, b cty.Value) (cty.Value, error) { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go index 63dd320e45..655977656f 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -1,6 +1,10 @@ package stdlib import ( + "bytes" + "strings" + "unicode/utf8" + "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/json" @@ -12,18 +16,40 @@ var JSONEncodeFunc = function.New(&function.Spec{ { Name: "val", Type: cty.DynamicPseudoType, + AllowUnknown: true, AllowDynamicType: true, AllowNull: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { val := args[0] if !val.IsWhollyKnown() { // We can't serialize unknowns, so if the value is unknown or // contains any _nested_ unknowns then our result must be - // unknown. - return cty.UnknownVal(retType), nil + // unknown. However, we might still be able to at least constrain + // the prefix of our string so that downstreams can sniff for + // whether it's valid JSON and what result types it could have. + + valRng := val.Range() + if valRng.CouldBeNull() { + // If null is possible then we can't constrain the result + // beyond the type constraint, because the very first character + // of the string is what distinguishes a null. + return cty.UnknownVal(retType), nil + } + b := cty.UnknownVal(retType).Refine() + ty := valRng.TypeConstraint() + switch { + case ty == cty.String: + b = b.StringPrefixFull(`"`) + case ty.IsObjectType() || ty.IsMapType(): + b = b.StringPrefixFull("{") + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + b = b.StringPrefixFull("[") + } + return b.NewValue(), nil } if val.IsNull() { @@ -35,6 +61,11 @@ var JSONEncodeFunc = function.New(&function.Spec{ return cty.NilVal, err } + // json.Marshal should already produce a trimmed string, but we'll + // make sure it always is because our unknown value refinements above + // assume there will be no leading whitespace before the value. + buf = bytes.TrimSpace(buf) + return cty.StringVal(string(buf)), nil }, }) @@ -50,6 +81,42 @@ var JSONDecodeFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { str := args[0] if !str.IsKnown() { + // If the string isn't known then we can't fully parse it, but + // if the value has been refined with a prefix then we may at + // least be able to reject obviously-invalid syntax and maybe + // even predict the result type. It's safe to return a specific + // result type only if parsing a full document with this prefix + // would return exactly that type or fail with a syntax error. + rng := str.Range() + if prefix := strings.TrimSpace(rng.StringPrefix()); prefix != "" { + // If we know at least one character then it should be one + // of the few characters that can introduce a JSON value. + switch r, _ := utf8.DecodeRuneInString(prefix); r { + case '{', '[': + // These can start object values and array values + // respectively, but we can't actually form a full + // object type constraint or tuple type constraint + // without knowing all of the attributes, so we + // will still return DynamicPseudoType in this case. + case '"': + // This means that the result will either be a string + // or parsing will fail. + return cty.String, nil + case 't', 'f': + // Must either be a boolean value or a syntax error. + return cty.Bool, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': + // These characters would all start the "number" production. + return cty.Number, nil + case 'n': + // n is valid to begin the keyword "null" but that doesn't + // give us any extra type information. + default: + // No other characters are valid as the beginning of a + // JSON value, so we can safely return an early error. + return cty.NilType, function.NewArgErrorf(0, "a JSON document cannot begin with the character %q", r) + } + } return cty.DynamicPseudoType, nil } diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go index ce73751356..73ef32f14d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -20,7 +20,8 @@ var AbsoluteFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Absolute(), nil }, @@ -40,7 +41,8 @@ var AddFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Add can panic if the input values are opposing infinities, // so we must catch that here in order to remain within @@ -74,7 +76,8 @@ var SubtractFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Sub can panic if the input values are infinities, // so we must catch that here in order to remain within @@ -108,7 +111,8 @@ var MultiplyFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Mul can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -143,7 +147,8 @@ var DivideFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Quo can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -178,7 +183,8 @@ var ModuloFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { // big.Float.Mul can panic if the input values are both zero or both // infinity, so we must catch that here in order to remain within @@ -205,17 +211,20 @@ var GreaterThanFunc = function.New(&function.Spec{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].GreaterThan(args[1]), nil }, @@ -227,17 +236,20 @@ var GreaterThanOrEqualToFunc = function.New(&function.Spec{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].GreaterThanOrEqualTo(args[1]), nil }, @@ -249,17 +261,20 @@ var LessThanFunc = function.New(&function.Spec{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].LessThan(args[1]), nil }, @@ -271,17 +286,20 @@ var LessThanOrEqualToFunc = function.New(&function.Spec{ { Name: "a", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, { Name: "b", Type: cty.Number, + AllowUnknown: true, AllowDynamicType: true, AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].LessThanOrEqualTo(args[1]), nil }, @@ -297,7 +315,8 @@ var NegateFunc = function.New(&function.Spec{ AllowMarked: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { return args[0].Negate(), nil }, @@ -311,7 +330,8 @@ var MinFunc = function.New(&function.Spec{ Type: cty.Number, AllowDynamicType: true, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if len(args) == 0 { return cty.NilVal, fmt.Errorf("must pass at least one number") @@ -336,7 +356,8 @@ var MaxFunc = function.New(&function.Spec{ Type: cty.Number, AllowDynamicType: true, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if len(args) == 0 { return cty.NilVal, fmt.Errorf("must pass at least one number") @@ -362,7 +383,8 @@ var IntFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { bf := args[0].AsBigFloat() if bf.IsInt() { @@ -384,7 +406,8 @@ var CeilFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { f := args[0].AsBigFloat() @@ -414,7 +437,8 @@ var FloorFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { f := args[0].AsBigFloat() @@ -447,7 +471,8 @@ var LogFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num float64 if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -476,7 +501,8 @@ var PowFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num float64 if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -502,7 +528,8 @@ var SignumFunc = function.New(&function.Spec{ Type: cty.Number, }, }, - Type: function.StaticReturnType(cty.Number), + Type: function.StaticReturnType(cty.Number), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var num int if err := gocty.FromCtyValue(args[0], &num); err != nil { @@ -539,6 +566,7 @@ var ParseIntFunc = function.New(&function.Spec{ } return cty.Number, nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { var numstr string diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go index ab4257b67b..246544421c 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go @@ -33,6 +33,7 @@ var RegexFunc = function.New(&function.Spec{ } return retTy, err }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { if retType == cty.DynamicPseudoType { return cty.DynamicVal, nil @@ -79,6 +80,7 @@ var RegexAllFunc = function.New(&function.Spec{ } return cty.List(retTy), err }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { ety := retType.ElementType() if ety == cty.DynamicPseudoType { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go index 6b2d97b4a5..009949d472 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -74,6 +74,7 @@ var ConcatFunc = function.New(&function.Spec{ } return cty.Tuple(etys), nil }, + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { switch { case retType.IsListType(): @@ -143,7 +144,8 @@ var RangeFunc = function.New(&function.Spec{ Name: "params", Type: cty.Number, }, - Type: function.StaticReturnType(cty.List(cty.Number)), + Type: function.StaticReturnType(cty.List(cty.Number)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var start, end, step cty.Value switch len(args) { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go index 15f4c05e76..6da2291916 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -23,7 +23,8 @@ var SetHasElementFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.Bool), + Type: function.StaticReturnType(cty.Bool), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return args[0].HasElement(args[1]), nil }, @@ -43,7 +44,8 @@ var SetUnionFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Union(s2) }, true), @@ -63,7 +65,8 @@ var SetIntersectionFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Intersection(s2) }, false), @@ -83,7 +86,8 @@ var SetSubtractFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.Subtract(s2) }, false), @@ -103,7 +107,8 @@ var SetSymmetricDifferenceFunc = function.New(&function.Spec{ Type: cty.Set(cty.DynamicPseudoType), AllowDynamicType: true, }, - Type: setOperationReturnType, + Type: setOperationReturnType, + RefineResult: refineNonNull, Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { return s1.SymmetricDifference(s2) }, false), diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go index f340ef747b..57ebce1b4a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -22,7 +22,8 @@ var UpperFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := args[0].AsString() out := strings.ToUpper(in) @@ -39,7 +40,8 @@ var LowerFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := args[0].AsString() out := strings.ToLower(in) @@ -56,7 +58,8 @@ var ReverseFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := []byte(args[0].AsString()) out := make([]byte, len(in)) @@ -81,25 +84,46 @@ var StrlenFunc = function.New(&function.Spec{ { Name: "str", Type: cty.String, + AllowUnknown: true, AllowDynamicType: true, }, }, Type: function.StaticReturnType(cty.Number), + RefineResult: func(b *cty.RefinementBuilder) *cty.RefinementBuilder { + // String length is never null and never negative. + // (We might refine the lower bound even more inside Impl.) + return b.NotNull().NumberRangeLowerBound(cty.NumberIntVal(0), true) + }, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - in := args[0].AsString() - l := 0 - - inB := []byte(in) - for i := 0; i < len(in); { - d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) - l++ - i += d + if !args[0].IsKnown() { + ret := cty.UnknownVal(cty.Number) + // We may be able to still return a constrained result based on the + // refined range of the unknown value. + inRng := args[0].Range() + if inRng.TypeConstraint() == cty.String { + prefixLen := int64(graphemeClusterCount(inRng.StringPrefix())) + ret = ret.Refine().NumberRangeLowerBound(cty.NumberIntVal(prefixLen), true).NewValue() + } + return ret, nil } + in := args[0].AsString() + l := graphemeClusterCount(in) return cty.NumberIntVal(int64(l)), nil }, }) +func graphemeClusterCount(in string) int { + l := 0 + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + return l +} + var SubstrFunc = function.New(&function.Spec{ Description: "Extracts a substring from the given string.", Params: []function.Parameter{ @@ -122,7 +146,8 @@ var SubstrFunc = function.New(&function.Spec{ AllowDynamicType: true, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { in := []byte(args[0].AsString()) var offset, length int @@ -218,7 +243,8 @@ var JoinFunc = function.New(&function.Spec{ Description: "One or more lists of strings to join.", Type: cty.List(cty.String), }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { sep := args[0].AsString() listVals := args[1:] @@ -258,18 +284,29 @@ var SortFunc = function.New(&function.Spec{ Description: "Applies a lexicographic sort to the elements of the given list.", Params: []function.Parameter{ { - Name: "list", - Type: cty.List(cty.String), + Name: "list", + Type: cty.List(cty.String), + AllowUnknown: true, }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { listVal := args[0] if !listVal.IsWhollyKnown() { // If some of the element values aren't known yet then we - // can't yet predict the order of the result. - return cty.UnknownVal(retType), nil + // can't yet predict the order of the result, but we can be + // sure that the length won't change. + ret := cty.UnknownVal(retType) + if listVal.Type().IsListType() { + rng := listVal.Range() + ret = ret.Refine(). + CollectionLengthLowerBound(rng.LengthLowerBound()). + CollectionLengthUpperBound(rng.LengthUpperBound()). + NewValue() + } + return ret, nil } if listVal.LengthInt() == 0 { // Easy path return listVal, nil @@ -307,7 +344,8 @@ var SplitFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.List(cty.String)), + Type: function.StaticReturnType(cty.List(cty.String)), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { sep := args[0].AsString() str := args[1].AsString() @@ -333,7 +371,8 @@ var ChompFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil @@ -356,7 +395,8 @@ var IndentFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { var spaces int if err := gocty.FromCtyValue(args[0], &spaces); err != nil { @@ -378,7 +418,8 @@ var TitleFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return cty.StringVal(strings.Title(args[0].AsString())), nil }, @@ -394,7 +435,8 @@ var TrimSpaceFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil }, @@ -416,7 +458,8 @@ var TrimFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() cutset := args[1].AsString() @@ -443,7 +486,8 @@ var TrimPrefixFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() prefix := args[1].AsString() @@ -467,7 +511,8 @@ var TrimSuffixFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() cutset := args[1].AsString() diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go index 573083bcf0..25a821bbf0 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string_replace.go @@ -30,7 +30,8 @@ var ReplaceFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { str := args[0].AsString() substr := args[1].AsString() @@ -59,7 +60,8 @@ var RegexReplaceFunc = function.New(&function.Spec{ Type: cty.String, }, }, - Type: function.StaticReturnType(cty.String), + Type: function.StaticReturnType(cty.String), + RefineResult: refineNonNull, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { str := args[0].AsString() substr := args[1].AsString() diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go index 1b88e9fa08..c342f13cba 100644 --- a/vendor/github.com/zclconf/go-cty/cty/helper.go +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -8,7 +8,7 @@ import ( // unknowns, for operations that short-circuit to return unknown in that case. func anyUnknown(values ...Value) bool { for _, val := range values { - if val.v == unknown { + if _, unknown := val.v.(*unknownType); unknown { return true } } @@ -39,7 +39,7 @@ func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, e ) } - if val.v == unknown { + if _, unknown := val.v.(*unknownType); unknown { hasUnknown = true } } diff --git a/vendor/github.com/zclconf/go-cty/cty/marks.go b/vendor/github.com/zclconf/go-cty/cty/marks.go index b889e73fa6..e747503ea9 100644 --- a/vendor/github.com/zclconf/go-cty/cty/marks.go +++ b/vendor/github.com/zclconf/go-cty/cty/marks.go @@ -190,6 +190,9 @@ func (val Value) HasSameMarks(other Value) bool { // An application that never calls this method does not need to worry about // handling marked values. func (val Value) Mark(mark interface{}) Value { + if _, ok := mark.(ValueMarks); ok { + panic("cannot call Value.Mark with a ValueMarks value (use WithMarks instead)") + } var newMarker marker newMarker.realV = val.v if mr, ok := val.v.(marker); ok { diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go index 83893c0237..b3aefa4503 100644 --- a/vendor/github.com/zclconf/go-cty/cty/unknown.go +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -3,11 +3,19 @@ package cty // unknownType is the placeholder type used for the sigil value representing // "Unknown", to make it unambigiously distinct from any other possible value. type unknownType struct { + // refinement is an optional object which, if present, describes some + // additional constraints we know about the range of real values this + // unknown value could be a placeholder for. + refinement unknownValRefinement } -// unknown is a special value that can be used as the internal value of a -// Value to create a placeholder for a value that isn't yet known. -var unknown interface{} = &unknownType{} +// totallyUnknown is the representation a a value we know nothing about at +// all. Subsequent refinements of an unknown value will cause creation of +// other values of unknownType that can represent additional constraints +// on the unknown value, but all unknown values start as totally unknown +// and we will also typically lose all unknown value refinements when +// round-tripping through serialization formats. +var totallyUnknown interface{} = &unknownType{} // UnknownVal returns an Value that represents an unknown value of the given // type. Unknown values can be used to represent a value that is @@ -19,7 +27,7 @@ var unknown interface{} = &unknownType{} func UnknownVal(t Type) Value { return Value{ ty: t, - v: unknown, + v: totallyUnknown, } } @@ -80,6 +88,6 @@ func init() { } DynamicVal = Value{ ty: DynamicPseudoType, - v: unknown, + v: totallyUnknown, } } diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go b/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go new file mode 100644 index 0000000000..d90bcbc367 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_refinement.go @@ -0,0 +1,747 @@ +package cty + +import ( + "fmt" + "math" + "strings" + + "github.com/zclconf/go-cty/cty/ctystrings" +) + +// Refine creates a [RefinementBuilder] with which to annotate the reciever +// with zero or more additional refinements that constrain the range of +// the value. +// +// Calling methods on a RefinementBuilder for a known value essentially just +// serves as assertions about the range of that value, leading to panics if +// those assertions don't hold in practice. This is mainly supported just to +// make programs that rely on refinements automatically self-check by using +// the refinement codepath unconditionally on both placeholders and final +// values for those placeholders. It's always a bug to refine the range of +// an unknown value and then later substitute an exact value outside of the +// refined range. +// +// Calling methods on a RefinementBuilder for an unknown value is perhaps +// more useful because the newly-refined value will then be a placeholder for +// a smaller range of values and so it may be possible for other operations +// on the unknown value to return a known result despite the exact value not +// yet being known. +// +// It is never valid to refine [DynamicVal], because that value is a +// placeholder for a value about which we knkow absolutely nothing. A value +// must at least have a known root type before it can support further +// refinement. +func (v Value) Refine() *RefinementBuilder { + v, marks := v.Unmark() + if unk, isUnk := v.v.(*unknownType); isUnk && unk.refinement != nil { + // We're refining a value that's already been refined before, so + // we'll start from a copy of its existing refinements. + wip := unk.refinement.copy() + return &RefinementBuilder{v, marks, wip} + } + + ty := v.Type() + var wip unknownValRefinement + switch { + case ty == DynamicPseudoType && !v.IsKnown(): + panic("cannot refine an unknown value of an unknown type") + case ty == String: + wip = &refinementString{} + case ty == Number: + wip = &refinementNumber{} + case ty.IsCollectionType(): + wip = &refinementCollection{ + // A collection can never have a negative length, so we'll + // start with that already constrained. + minLen: 0, + maxLen: math.MaxInt, + } + case ty == Bool || ty.IsObjectType() || ty.IsTupleType() || ty.IsCapsuleType(): + // For other known types we'll just track nullability + wip = &refinementNullable{} + case ty == DynamicPseudoType && v.IsNull(): + // It's okay in principle to refine a null value of unknown type, + // although all we can refine about it is that it's definitely null and + // so this is pretty pointless and only supported to avoid callers + // always needing to treat this situation as a special case to avoid + // panic. + wip = &refinementNullable{ + isNull: tristateTrue, + } + default: + // we leave "wip" as nil for all other types, representing that + // they don't support refinements at all and so any call on the + // RefinementBuilder should fail. + + // NOTE: We intentionally don't allow any refinements for + // cty.DynamicVal here, even though it could be nice in principle + // to at least track non-nullness for those, because it's historically + // been valid to directly compare values with cty.DynamicVal using + // the Go "==" operator and recording a refinement for an untyped + // unknown value would break existing code relying on that. + } + + return &RefinementBuilder{v, marks, wip} +} + +// RefineWith is a variant of Refine which uses callback functions instead of +// the builder pattern. +// +// The result is equivalent to passing the return value of [Value.Refine] to the +// first callback, and then continue passing the builder through any other +// callbacks in turn, and then calling [RefinementBuilder.NewValue] on the +// final result. +// +// The builder pattern approach of [Value.Refine] is more convenient for inline +// annotation of refinements when constructing a value, but this alternative +// approach may be more convenient when applying pre-defined collections of +// refinements, or when refinements are defined separately from the values +// they will apply to. +// +// Each refiner callback should return the same pointer that it was given, +// typically after having mutated it using the [RefinementBuilder] methods. +// It's invalid to return a different builder. +func (v Value) RefineWith(refiners ...func(*RefinementBuilder) *RefinementBuilder) Value { + if len(refiners) == 0 { + return v + } + origBuilder := v.Refine() + builder := origBuilder + for _, refiner := range refiners { + builder = refiner(builder) + if builder != origBuilder { + panic("refiner callback returned a different builder") + } + } + return builder.NewValue() +} + +// RefineNotNull is a shorthand for Value.Refine().NotNull().NewValue(), because +// declaring that a unknown value isn't null is by far the most common use of +// refinements. +func (v Value) RefineNotNull() Value { + return v.Refine().NotNull().NewValue() +} + +// RefinementBuilder is a supporting type for the [Value.Refine] method, +// using the builder pattern to apply zero or more constraints before +// constructing a new value with all of those constraints applied. +// +// Most of the methods of this type return the same reciever to allow +// for method call chaining. End call chains with a call to +// [RefinementBuilder.NewValue] to obtain the newly-refined value. +type RefinementBuilder struct { + orig Value + marks ValueMarks + wip unknownValRefinement +} + +func (b *RefinementBuilder) assertRefineable() { + if b.wip == nil { + panic(fmt.Sprintf("cannot refine a %#v value", b.orig.Type())) + } +} + +// NotNull constrains the value as definitely not being null. +// +// NotNull is valid when refining values of the following types: +// - number, boolean, and string values +// - list, set, or map types of any element type +// - values of object types +// - values of collection types +// - values of capsule types +// +// When refining any other type this function will panic. +// +// In particular note that it is not valid to constrain an untyped value +// -- a value whose type is `cty.DynamicPseudoType` -- as being non-null. +// An unknown value of an unknown type is always completely unconstrained. +func (b *RefinementBuilder) NotNull() *RefinementBuilder { + b.assertRefineable() + + if b.orig.IsKnown() && b.orig.IsNull() { + panic("refining null value as non-null") + } + if b.wip.null() == tristateTrue { + panic("refining null value as non-null") + } + + b.wip.setNull(tristateFalse) + + return b +} + +// Null constrains the value as definitely null. +// +// Null is valid for the same types as [RefinementBuilder.NotNull]. +// When refining any other type this function will panic. +// +// Explicitly cnstraining a value to be null is strange because that suggests +// that the caller does actually know the value -- there is only one null +// value for each type constraint -- but this is here for symmetry with the +// fact that a [ValueRange] can also represent that a value is definitely null. +func (b *RefinementBuilder) Null() *RefinementBuilder { + b.assertRefineable() + + if b.orig.IsKnown() && !b.orig.IsNull() { + panic("refining non-null value as null") + } + if b.wip.null() == tristateFalse { + panic("refining non-null value as null") + } + + b.wip.setNull(tristateTrue) + + return b +} + +// NumericRange constrains the upper and/or lower bounds of a number value, +// or panics if this builder is not refining a number value. +// +// The two given values are interpreted as inclusive bounds and either one +// may be an unknown number if only one of the two bounds is currently known. +// If either of the given values is not a non-null number value then this +// function will panic. +func (b *RefinementBuilder) NumberRangeInclusive(min, max Value) *RefinementBuilder { + return b.NumberRangeLowerBound(min, true).NumberRangeUpperBound(max, true) +} + +// NumberRangeLowerBound constraints the lower bound of a number value, or +// panics if this builder is not refining a number value. +func (b *RefinementBuilder) NumberRangeLowerBound(min Value, inclusive bool) *RefinementBuilder { + b.assertRefineable() + + wip, ok := b.wip.(*refinementNumber) + if !ok { + panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type())) + } + + if !min.IsKnown() { + // Nothing to do if the lower bound is unknown. + return b + } + if min.IsNull() { + panic("number range lower bound must not be null") + } + + if inclusive { + if gt := min.GreaterThan(b.orig); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining %#v to be >= %#v", b.orig, min)) + } + } else { + if gt := min.GreaterThanOrEqualTo(b.orig); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining %#v to be > %#v", b.orig, min)) + } + } + + if wip.min != NilVal { + var ok Value + if inclusive && !wip.minInc { + ok = min.GreaterThan(wip.min) + } else { + ok = min.GreaterThanOrEqualTo(wip.min) + } + if ok.IsKnown() && ok.False() { + return b // Our existing refinement is more constrained + } + } + + if min != NegativeInfinity { + wip.min = min + wip.minInc = inclusive + } + + wip.assertConsistentBounds() + return b +} + +// NumberRangeUpperBound constraints the upper bound of a number value, or +// panics if this builder is not refining a number value. +func (b *RefinementBuilder) NumberRangeUpperBound(max Value, inclusive bool) *RefinementBuilder { + b.assertRefineable() + + wip, ok := b.wip.(*refinementNumber) + if !ok { + panic(fmt.Sprintf("cannot refine numeric bounds for a %#v value", b.orig.Type())) + } + + if !max.IsKnown() { + // Nothing to do if the upper bound is unknown. + return b + } + if max.IsNull() { + panic("number range upper bound must not be null") + } + + if inclusive { + if lt := max.LessThan(b.orig); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining %#v to be <= %#v", b.orig, max)) + } + } else { + if lt := max.LessThanOrEqualTo(b.orig); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining %#v to be < %#v", b.orig, max)) + } + } + + if wip.max != NilVal { + var ok Value + if inclusive && !wip.maxInc { + ok = max.LessThan(wip.max) + } else { + ok = max.LessThanOrEqualTo(wip.max) + } + if ok.IsKnown() && ok.False() { + return b // Our existing refinement is more constrained + } + } + + if max != PositiveInfinity { + wip.max = max + wip.maxInc = inclusive + } + + wip.assertConsistentBounds() + return b +} + +// CollectionLengthLowerBound constrains the lower bound of the length of a +// collection value, or panics if this builder is not refining a collection +// value. +func (b *RefinementBuilder) CollectionLengthLowerBound(min int) *RefinementBuilder { + b.assertRefineable() + + wip, ok := b.wip.(*refinementCollection) + if !ok { + panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type())) + } + + minVal := NumberIntVal(int64(min)) + if b.orig.IsKnown() { + realLen := b.orig.Length() + if gt := minVal.GreaterThan(realLen); gt.IsKnown() && gt.True() { + panic(fmt.Sprintf("refining collection of length %#v with lower bound %#v", realLen, min)) + } + } + + if wip.minLen > min { + return b // Our existing refinement is more constrained + } + + wip.minLen = min + wip.assertConsistentLengthBounds() + + return b +} + +// CollectionLengthUpperBound constrains the upper bound of the length of a +// collection value, or panics if this builder is not refining a collection +// value. +// +// The upper bound must be a known, non-null number or this function will +// panic. +func (b *RefinementBuilder) CollectionLengthUpperBound(max int) *RefinementBuilder { + b.assertRefineable() + + wip, ok := b.wip.(*refinementCollection) + if !ok { + panic(fmt.Sprintf("cannot refine collection length bounds for a %#v value", b.orig.Type())) + } + + if b.orig.IsKnown() { + maxVal := NumberIntVal(int64(max)) + realLen := b.orig.Length() + if lt := maxVal.LessThan(realLen); lt.IsKnown() && lt.True() { + panic(fmt.Sprintf("refining collection of length %#v with upper bound %#v", realLen, max)) + } + } + + if wip.maxLen < max { + return b // Our existing refinement is more constrained + } + + wip.maxLen = max + wip.assertConsistentLengthBounds() + + return b +} + +// CollectionLength is a shorthand for passing the same length to both +// [CollectionLengthLowerBound] and [CollectionLengthUpperBound]. +// +// A collection with a refined length with equal bounds can sometimes collapse +// to a known value. Refining to length zero always produces a known value. +// The behavior for other lengths varies by collection type kind. +// +// If the unknown value is of a set type, it's only valid to use this method +// if the caller knows that there will be the given number of _unique_ values +// in the set. If any values might potentially coalesce together once known, +// use [CollectionLengthUpperBound] instead. +func (b *RefinementBuilder) CollectionLength(length int) *RefinementBuilder { + return b.CollectionLengthLowerBound(length).CollectionLengthUpperBound(length) +} + +// StringPrefix constrains the prefix of a string value, or panics if this +// builder is not refining a string value. +// +// The given prefix will be Unicode normalized in the same way that a +// cty.StringVal would be. +// +// Due to Unicode normalization and grapheme cluster rules, appending new +// characters to a string can change the meaning of earlier characters. +// StringPrefix may discard one or more characters from the end of the given +// prefix to avoid that problem. +// +// Although cty cannot check this automatically, applications should avoid +// relying on the discarding of the suffix for correctness. For example, if the +// prefix ends with an emoji base character then StringPrefix will discard it +// in case subsequent characters include emoji modifiers, but it's still +// incorrect for the final string to use an entirely different base character. +// +// Applications which fully control the final result and can guarantee the +// subsequent characters will not combine with the prefix may be able to use +// [RefinementBuilder.StringPrefixFull] instead, after carefully reviewing +// the constraints described in its documentation. +func (b *RefinementBuilder) StringPrefix(prefix string) *RefinementBuilder { + return b.StringPrefixFull(ctystrings.SafeKnownPrefix(prefix)) +} + +// StringPrefixFull is a variant of StringPrefix that will never shorten the +// given prefix to take into account the possibility of the next character +// combining with the end of the prefix. +// +// Applications which fully control the subsequent characters can use this +// as long as they guarantee that the characters added later cannot possibly +// combine with characters at the end of the prefix to form a single grapheme +// cluster. For example, it would be unsafe to use the full prefix "hello" if +// there is any chance that the final string will add a combining diacritic +// character after the "o", because that would then change the final character. +// +// Use [RefinementBuilder.StringPrefix] instead if an application cannot fully +// control the final result to avoid violating this rule. +func (b *RefinementBuilder) StringPrefixFull(prefix string) *RefinementBuilder { + b.assertRefineable() + + wip, ok := b.wip.(*refinementString) + if !ok { + panic(fmt.Sprintf("cannot refine string prefix for a %#v value", b.orig.Type())) + } + + // We must apply the same Unicode processing we'd normally use for a + // cty string so that the prefix will be comparable. + prefix = NormalizeString(prefix) + + // If we have a known string value then the given prefix must actually + // match it. + if b.orig.IsKnown() && !b.orig.IsNull() { + have := b.orig.AsString() + matchLen := len(have) + if l := len(prefix); l < matchLen { + matchLen = l + } + have = have[:matchLen] + new := prefix[:matchLen] + if have != new { + panic("refined prefix is inconsistent with known value") + } + } + + // If we already have a refined prefix then the overlapping parts of that + // and the new prefix must match. + { + matchLen := len(wip.prefix) + if l := len(prefix); l < matchLen { + matchLen = l + } + + have := wip.prefix[:matchLen] + new := prefix[:matchLen] + if have != new { + panic("refined prefix is inconsistent with previous refined prefix") + } + } + + // We'll only save the new prefix if it's longer than the one we already + // had. + if len(prefix) > len(wip.prefix) { + wip.prefix = prefix + } + + return b +} + +// NewValue completes the refinement process by constructing a new value +// that is guaranteed to meet all of the previously-specified refinements. +// +// If the original value being refined was known then the result is exactly +// that value, because otherwise the previous refinement calls would have +// panicked reporting the refinements as invalid for the value. +// +// If the original value was unknown then the result is typically also unknown +// but may have additional refinements compared to the original. If the applied +// refinements have reduced the range to a single exact value then the result +// might be that known value. +func (b *RefinementBuilder) NewValue() (ret Value) { + defer func() { + // Regardless of how we return, the new value should have the same + // marks as our original value. + ret = ret.WithMarks(b.marks) + }() + + if b.orig.IsKnown() { + return b.orig + } + + // We have a few cases where the value has been refined enough that we now + // know exactly what the value is, or at least we can produce a more + // detailed approximation of it. + switch b.wip.null() { + case tristateTrue: + // There is only one null value of each type so this is now known. + return NullVal(b.orig.Type()) + case tristateFalse: + // If we know it's definitely not null then we might have enough + // information to construct a known, non-null value. + if rfn, ok := b.wip.(*refinementNumber); ok { + // If both bounds are inclusive and equal then our value can + // only be the same number as the bounds. + if rfn.maxInc && rfn.minInc { + if rfn.min != NilVal && rfn.max != NilVal { + eq := rfn.min.Equals(rfn.max) + if eq.IsKnown() && eq.True() { + return rfn.min + } + } + } + } else if rfn, ok := b.wip.(*refinementCollection); ok { + // If both of the bounds are equal then we know the length is + // the same number as the bounds. + if rfn.minLen == rfn.maxLen { + knownLen := rfn.minLen + ty := b.orig.Type() + if knownLen == 0 { + // If we know the length is zero then we can construct + // a known value of any collection kind. + switch { + case ty.IsListType(): + return ListValEmpty(ty.ElementType()) + case ty.IsSetType(): + return SetValEmpty(ty.ElementType()) + case ty.IsMapType(): + return MapValEmpty(ty.ElementType()) + } + } else if ty.IsListType() { + // If we know the length of the list then we can + // create a known list with unknown elements instead + // of a wholly-unknown list. + elems := make([]Value, knownLen) + unk := UnknownVal(ty.ElementType()) + for i := range elems { + elems[i] = unk + } + return ListVal(elems) + } else if ty.IsSetType() && knownLen == 1 { + // If we know we have a one-element set then we + // know the one element can't possibly coalesce with + // anything else and so we can create a known set with + // an unknown element. + return SetVal([]Value{UnknownVal(ty.ElementType())}) + } + } + } + } + + return Value{ + ty: b.orig.ty, + v: &unknownType{refinement: b.wip}, + } +} + +// unknownValRefinment is an interface pretending to be a sum type representing +// the different kinds of unknown value refinements we support for different +// types of value. +type unknownValRefinement interface { + unknownValRefinementSigil() + copy() unknownValRefinement + null() tristateBool + setNull(tristateBool) + rawEqual(other unknownValRefinement) bool + GoString() string +} + +type refinementString struct { + refinementNullable + prefix string +} + +func (r *refinementString) unknownValRefinementSigil() {} + +func (r *refinementString) copy() unknownValRefinement { + ret := *r + // Everything in refinementString is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementString) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementString) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.prefix == other.prefix) + } +} + +func (r *refinementString) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.prefix != "" { + fmt.Fprintf(&b, ".StringPrefixFull(%q)", r.prefix) + } + return b.String() +} + +type refinementNumber struct { + refinementNullable + min, max Value + minInc, maxInc bool +} + +func (r *refinementNumber) unknownValRefinementSigil() {} + +func (r *refinementNumber) copy() unknownValRefinement { + ret := *r + // Everything in refinementNumber is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementNumber) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementNumber) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.min.RawEquals(other.min) && + r.max.RawEquals(other.max) && + r.minInc == other.minInc && + r.maxInc == other.maxInc) + } +} + +func (r *refinementNumber) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.min != NilVal && r.min != NegativeInfinity { + fmt.Fprintf(&b, ".NumberLowerBound(%#v, %t)", r.min, r.minInc) + } + if r.max != NilVal && r.max != PositiveInfinity { + fmt.Fprintf(&b, ".NumberUpperBound(%#v, %t)", r.max, r.maxInc) + } + return b.String() +} + +func (r *refinementNumber) assertConsistentBounds() { + if r.min == NilVal || r.max == NilVal { + return // If only one bound is constrained then there's nothing to be inconsistent with + } + var ok Value + if r.minInc != r.maxInc { + ok = r.min.LessThan(r.max) + } else { + ok = r.min.LessThanOrEqualTo(r.max) + } + if ok.IsKnown() && ok.False() { + panic(fmt.Sprintf("number lower bound %#v is greater than upper bound %#v", r.min, r.max)) + } +} + +type refinementCollection struct { + refinementNullable + minLen, maxLen int +} + +func (r *refinementCollection) unknownValRefinementSigil() {} + +func (r *refinementCollection) copy() unknownValRefinement { + ret := *r + // Everything in refinementCollection is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementCollection) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementCollection) + if !ok { + return false + } + return (r.refinementNullable.rawEqual(&other.refinementNullable) && + r.minLen == other.minLen && + r.maxLen == other.maxLen) + } +} + +func (r *refinementCollection) GoString() string { + var b strings.Builder + b.WriteString(r.refinementNullable.GoString()) + if r.minLen != 0 { + fmt.Fprintf(&b, ".CollectionLengthLowerBound(%d)", r.minLen) + } + if r.maxLen != math.MaxInt { + fmt.Fprintf(&b, ".CollectionLengthUpperBound(%d)", r.maxLen) + } + return b.String() +} + +func (r *refinementCollection) assertConsistentLengthBounds() { + if r.maxLen < r.minLen { + panic(fmt.Sprintf("collection length upper bound %d is less than lower bound %d", r.maxLen, r.minLen)) + } +} + +type refinementNullable struct { + isNull tristateBool +} + +func (r *refinementNullable) unknownValRefinementSigil() {} + +func (r *refinementNullable) copy() unknownValRefinement { + ret := *r + // Everything in refinementJustNull is immutable, so a shallow copy is sufficient. + return &ret +} + +func (r *refinementNullable) null() tristateBool { + return r.isNull +} + +func (r *refinementNullable) setNull(v tristateBool) { + r.isNull = v +} + +func (r *refinementNullable) rawEqual(other unknownValRefinement) bool { + { + other, ok := other.(*refinementNullable) + if !ok { + return false + } + return r.isNull == other.isNull + } +} + +func (r *refinementNullable) GoString() string { + switch r.isNull { + case tristateFalse: + return ".NotNull()" + case tristateTrue: + return ".Null()" + default: + return "" + } +} + +type tristateBool rune + +const tristateTrue tristateBool = 'T' +const tristateFalse tristateBool = 'F' +const tristateUnknown tristateBool = 0 diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go index f6a25ddef9..e5b29b603a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value.go +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -48,7 +48,8 @@ func (val Value) IsKnown() bool { if val.IsMarked() { return val.unmarkForce().IsKnown() } - return val.v != unknown + _, unknown := val.v.(*unknownType) + return !unknown } // IsNull returns true if the value is null. Values of any type can be diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go index 6dcae273d9..a1743a09ef 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_init.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -5,8 +5,7 @@ import ( "math/big" "reflect" - "golang.org/x/text/unicode/norm" - + "github.com/zclconf/go-cty/cty/ctystrings" "github.com/zclconf/go-cty/cty/set" ) @@ -107,7 +106,7 @@ func StringVal(v string) Value { // A return value from this function can be meaningfully compared byte-for-byte // with a Value.AsString result. func NormalizeString(s string) string { - return norm.NFC.String(s) + return ctystrings.Normalize(s) } // ObjectVal returns a Value of an object type whose structure is defined diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 88b3637cf8..c4584bd936 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -33,7 +33,17 @@ func (val Value) GoString() string { return "cty.DynamicVal" } if !val.IsKnown() { - return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + rfn := val.v.(*unknownType).refinement + var suffix string + if rfn != nil { + calls := rfn.GoString() + if calls == ".NotNull()" { + suffix = ".RefineNotNull()" + } else { + suffix = ".Refine()" + rfn.GoString() + ".NewValue()" + } + } + return fmt.Sprintf("cty.UnknownVal(%#v)%s", val.ty, suffix) } // By the time we reach here we've dealt with all of the exceptions around @@ -125,13 +135,38 @@ func (val Value) Equals(other Value) Value { return val.Equals(other).WithMarks(valMarks, otherMarks) } - // Start by handling Unknown values before considering types. - // This needs to be done since Null values are always equal regardless of - // type. + // Some easy cases with comparisons to null. + switch { + case val.IsNull() && definitelyNotNull(other): + return False + case other.IsNull() && definitelyNotNull(val): + return False + } + // If we have one known value and one unknown value then we may be + // able to quickly disqualify equality based on the range of the unknown + // value. + if val.IsKnown() && !other.IsKnown() { + otherRng := other.Range() + if ok := otherRng.Includes(val); ok.IsKnown() && ok.False() { + return False + } + } else if other.IsKnown() && !val.IsKnown() { + valRng := val.Range() + if ok := valRng.Includes(other); ok.IsKnown() && ok.False() { + return False + } + } + + // We need to deal with unknown values before anything else with nulls + // because any unknown value that hasn't yet been refined as non-null + // could become null, and nulls of any types are equal to one another. + unknownResult := func() Value { + return UnknownVal(Bool).Refine().NotNull().NewValue() + } switch { case !val.IsKnown() && !other.IsKnown(): // both unknown - return UnknownVal(Bool) + return unknownResult() case val.IsKnown() && !other.IsKnown(): switch { case val.IsNull(), other.ty.HasDynamicTypes(): @@ -139,13 +174,13 @@ func (val Value) Equals(other Value) Value { // nulls of any type are equal. // An unknown with a dynamic type compares as unknown, which we need // to check before the type comparison below. - return UnknownVal(Bool) + return unknownResult() case !val.ty.Equals(other.ty): // There is no null comparison or dynamic types, so unequal types // will never be equal. return False default: - return UnknownVal(Bool) + return unknownResult() } case other.IsKnown() && !val.IsKnown(): switch { @@ -154,13 +189,13 @@ func (val Value) Equals(other Value) Value { // nulls of any type are equal. // An unknown with a dynamic type compares as unknown, which we need // to check before the type comparison below. - return UnknownVal(Bool) + return unknownResult() case !other.ty.Equals(val.ty): // There's no null comparison or dynamic types, so unequal types // will never be equal. return False default: - return UnknownVal(Bool) + return unknownResult() } } @@ -182,7 +217,7 @@ func (val Value) Equals(other Value) Value { return BoolVal(false) } - return UnknownVal(Bool) + return unknownResult() } if !val.ty.Equals(other.ty) { @@ -216,7 +251,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -237,7 +272,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -259,7 +294,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -276,8 +311,8 @@ func (val Value) Equals(other Value) Value { // in one are also in the other. for it := s1.Iterator(); it.Next(); { rv := it.Value() - if rv == unknown { // "unknown" is the internal representation of unknown-ness - return UnknownVal(Bool) + if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness + return unknownResult() } if !s2.Has(rv) { equal = false @@ -285,8 +320,8 @@ func (val Value) Equals(other Value) Value { } for it := s2.Iterator(); it.Next(); { rv := it.Value() - if rv == unknown { // "unknown" is the internal representation of unknown-ness - return UnknownVal(Bool) + if _, unknown := rv.(*unknownType); unknown { // "*unknownType" is the internal representation of unknown-ness + return unknownResult() } if !s1.Has(rv) { equal = false @@ -313,7 +348,7 @@ func (val Value) Equals(other Value) Value { } eq := lhs.Equals(rhs) if !eq.IsKnown() { - return UnknownVal(Bool) + return unknownResult() } if eq.False() { result = false @@ -393,7 +428,17 @@ func (val Value) RawEquals(other Value) bool { other = other.unmarkForce() if (!val.IsKnown()) && (!other.IsKnown()) { - return true + // If either unknown value has refinements then they must match. + valRfn := val.v.(*unknownType).refinement + otherRfn := other.v.(*unknownType).refinement + switch { + case (valRfn == nil) != (otherRfn == nil): + return false + case valRfn != nil: + return valRfn.rawEqual(otherRfn) + default: + return true + } } if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { return false @@ -548,7 +593,8 @@ func (val Value) Add(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Add, val.Range(), other.Range())) + return ret.RefineNotNull() } ret := new(big.Float) @@ -567,7 +613,8 @@ func (val Value) Subtract(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Subtract, val.Range(), other.Range())) + return ret.RefineNotNull() } return val.Add(other.Negate()) @@ -583,7 +630,7 @@ func (val Value) Negate() Value { if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } ret := new(big.Float).Neg(val.v.(*big.Float)) @@ -600,8 +647,14 @@ func (val Value) Multiply(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + // If either value is exactly zero then the result must either be + // zero or an error. + if val == Zero || other == Zero { + return Zero + } shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + ret := shortCircuit.RefineWith(numericRangeArithmetic(Value.Multiply, val.Range(), other.Range())) + return ret.RefineNotNull() } // find the larger precision of the arguments @@ -646,7 +699,10 @@ func (val Value) Divide(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + // TODO: We could potentially refine the range of the result here, but + // we don't right now because our division operation is not monotone + // if the denominator could potentially be zero. + return (*shortCircuit).RefineNotNull() } ret := new(big.Float) @@ -678,7 +734,7 @@ func (val Value) Modulo(other Value) Value { if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } // We cheat a bit here with infinities, just abusing the Multiply operation @@ -716,7 +772,7 @@ func (val Value) Absolute() Value { if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Number) - return *shortCircuit + return (*shortCircuit).Refine().NotNull().NumberRangeInclusive(Zero, UnknownVal(Number)).NewValue() } ret := (&big.Float{}).Abs(val.v.(*big.Float)) @@ -889,23 +945,23 @@ func (val Value) HasIndex(key Value) Value { } if val.ty == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } switch { case val.Type().IsListType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != Number { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if !val.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } index, accuracy := key.v.(*big.Float).Int64() @@ -916,17 +972,17 @@ func (val Value) HasIndex(key Value) Value { return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) case val.Type().IsMapType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != String { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if !val.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } keyStr := key.v.(string) @@ -935,14 +991,14 @@ func (val Value) HasIndex(key Value) Value { return BoolVal(exists) case val.Type().IsTupleType(): if key.Type() == DynamicPseudoType { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if key.Type() != Number { return False } if !key.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } index, accuracy := key.v.(*big.Float).Int64() @@ -977,10 +1033,10 @@ func (val Value) HasElement(elem Value) Value { panic("not a set type") } if !val.IsKnown() || !elem.IsKnown() { - return UnknownVal(Bool) + return UnknownVal(Bool).RefineNotNull() } if val.IsNull() { - panic("can't call HasElement on a nil value") + panic("can't call HasElement on a null value") } if !ty.ElementType().Equals(elem.Type()) { return False @@ -1012,7 +1068,10 @@ func (val Value) Length() Value { } if !val.IsKnown() { - return UnknownVal(Number) + // If the whole collection isn't known then the length isn't known + // either, but we can still put some bounds on the range of the result. + rng := val.Range() + return UnknownVal(Number).RefineWith(valueRefineLengthResult(rng)) } if val.Type().IsSetType() { // The Length rules are a little different for sets because if any @@ -1030,13 +1089,26 @@ func (val Value) Length() Value { // unknown value cannot represent more than one known value. return NumberIntVal(storeLength) } - // Otherwise, we cannot predict the length. - return UnknownVal(Number) + // Otherwise, we cannot predict the length exactly but we can at + // least constrain both bounds of its range, because value coalescing + // can only ever reduce the number of elements in the set. + return UnknownVal(Number).Refine().NotNull().NumberRangeInclusive(NumberIntVal(1), NumberIntVal(storeLength)).NewValue() } return NumberIntVal(int64(val.LengthInt())) } +func valueRefineLengthResult(collRng ValueRange) func(*RefinementBuilder) *RefinementBuilder { + return func(b *RefinementBuilder) *RefinementBuilder { + return b. + NotNull(). + NumberRangeInclusive( + NumberIntVal(int64(collRng.LengthLowerBound())), + NumberIntVal(int64(collRng.LengthUpperBound())), + ) + } +} + // LengthInt is like Length except it returns an int. It has the same behavior // as Length except that it will panic if the receiver is unknown. // @@ -1167,7 +1239,7 @@ func (val Value) Not() Value { if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(!val.v.(bool)) @@ -1183,8 +1255,14 @@ func (val Value) And(other Value) Value { } if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + // If either value is known to be exactly False then it doesn't + // matter what the other value is, because the final result must + // either be False or an error. + if val == False || other == False { + return False + } shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(bool) && other.v.(bool)) @@ -1200,8 +1278,14 @@ func (val Value) Or(other Value) Value { } if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + // If either value is known to be exactly True then it doesn't + // matter what the other value is, because the final result must + // either be True or an error. + if val == True || other == True { + return True + } shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(bool) || other.v.(bool)) @@ -1217,8 +1301,30 @@ func (val Value) LessThan(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + // We might be able to return a known answer even with unknown inputs. + // FIXME: This is more conservative than it needs to be, because it + // treats all bounds as exclusive bounds. + valRng := val.Range() + otherRng := other.Range() + if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number { + valMax, _ := valRng.NumberUpperBound() + otherMin, _ := otherRng.NumberLowerBound() + if valMax.IsKnown() && otherMin.IsKnown() { + if r := valMax.LessThan(otherMin); r.True() { + return True + } + } + valMin, _ := valRng.NumberLowerBound() + otherMax, _ := otherRng.NumberUpperBound() + if valMin.IsKnown() && otherMax.IsKnown() { + if r := valMin.GreaterThan(otherMax); r.True() { + return False + } + } + } + shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) @@ -1234,8 +1340,30 @@ func (val Value) GreaterThan(other Value) Value { } if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + // We might be able to return a known answer even with unknown inputs. + // FIXME: This is more conservative than it needs to be, because it + // treats all bounds as exclusive bounds. + valRng := val.Range() + otherRng := other.Range() + if valRng.TypeConstraint() == Number && other.Range().TypeConstraint() == Number { + valMin, _ := valRng.NumberLowerBound() + otherMax, _ := otherRng.NumberUpperBound() + if valMin.IsKnown() && otherMax.IsKnown() { + if r := valMin.GreaterThan(otherMax); r.True() { + return True + } + } + valMax, _ := valRng.NumberUpperBound() + otherMin, _ := otherRng.NumberLowerBound() + if valMax.IsKnown() && otherMin.IsKnown() { + if r := valMax.LessThan(otherMin); r.True() { + return False + } + } + } + shortCircuit = forceShortCircuitType(shortCircuit, Bool) - return *shortCircuit + return (*shortCircuit).RefineNotNull() } return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) diff --git a/vendor/github.com/zclconf/go-cty/cty/value_range.go b/vendor/github.com/zclconf/go-cty/cty/value_range.go new file mode 100644 index 0000000000..36f21946e3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_range.go @@ -0,0 +1,408 @@ +package cty + +import ( + "fmt" + "math" + "strings" +) + +// Range returns an object that offers partial information about the range +// of the receiver. +// +// This is most relevant for unknown values, because it gives access to any +// optional additional constraints on the final value (specified by the source +// of the value using "refinements") beyond what we can assume from the value's +// type. +// +// Calling Range for a known value is a little strange, but it's supported by +// returning a [ValueRange] object that describes the exact value as closely +// as possible. Typically a caller should work directly with the exact value +// in that case, but some purposes might only need the level of detail +// offered by ranges and so can share code between both known and unknown +// values. +func (v Value) Range() ValueRange { + // For an unknown value we just use its own refinements. + if unk, isUnk := v.v.(*unknownType); isUnk { + refinement := unk.refinement + if refinement == nil { + // We'll generate an unconstrained refinement, just to + // simplify the code in ValueRange methods which can + // therefore assume that there's always a refinement. + refinement = &refinementNullable{isNull: tristateUnknown} + } + return ValueRange{v.Type(), refinement} + } + + if v.IsNull() { + // If we know a value is null then we'll just report that, + // since no other refinements make sense for a definitely-null value. + return ValueRange{ + v.Type(), + &refinementNullable{isNull: tristateTrue}, + } + } + + // For a known value we construct synthetic refinements that match + // the value, just as a convenience for callers that want to share + // codepaths between both known and unknown values. + ty := v.Type() + var synth unknownValRefinement + switch { + case ty == String: + synth = &refinementString{ + prefix: v.AsString(), + } + case ty == Number: + synth = &refinementNumber{ + min: v, + max: v, + minInc: true, + maxInc: true, + } + case ty.IsCollectionType(): + if lenVal := v.Length(); lenVal.IsKnown() { + l, _ := lenVal.AsBigFloat().Int64() + synth = &refinementCollection{ + minLen: int(l), + maxLen: int(l), + } + } else { + synth = &refinementCollection{ + minLen: 0, + maxLen: math.MaxInt, + } + } + + default: + // If we don't have anything else to say then we can at least + // guarantee that the value isn't null. + synth = &refinementNullable{} + } + + // If we get down here then the value is definitely not null + synth.setNull(tristateFalse) + + return ValueRange{ty, synth} +} + +// ValueRange offers partial information about the range of a value. +// +// This is primarily interesting for unknown values, because it provides access +// to any additional known constraints (specified using "refinements") on the +// range of the value beyond what is represented by the value's type. +type ValueRange struct { + ty Type + raw unknownValRefinement +} + +// TypeConstraint returns a type constraint describing the value's type as +// precisely as possible with the available information. +func (r ValueRange) TypeConstraint() Type { + return r.ty +} + +// CouldBeNull returns true unless the value being described is definitely +// known to represent a non-null value. +func (r ValueRange) CouldBeNull() bool { + if r.raw == nil { + // A totally-unconstrained unknown value could be null + return true + } + return r.raw.null() != tristateFalse +} + +// DefinitelyNotNull returns true if there are no null values in the range. +func (r ValueRange) DefinitelyNotNull() bool { + if r.raw == nil { + // A totally-unconstrained unknown value could be null + return false + } + return r.raw.null() == tristateFalse +} + +// NumberLowerBound returns information about the lower bound of the range of +// a number value, or panics if the value is definitely not a number. +// +// If the value is nullable then the result represents the range of the number +// only if it turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known lower bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) NumberLowerBound() (min Value, inclusive bool) { + if r.ty == DynamicPseudoType { + // We don't even know if this is a number yet. + return UnknownVal(Number), false + } + if r.ty != Number { + panic(fmt.Sprintf("NumberLowerBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementNumber); ok && rfn.min != NilVal { + if !rfn.min.IsKnown() { + return NegativeInfinity, true + } + return rfn.min, rfn.minInc + } + return NegativeInfinity, false +} + +// NumberUpperBound returns information about the upper bound of the range of +// a number value, or panics if the value is definitely not a number. +// +// If the value is nullable then the result represents the range of the number +// only if it turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known upper bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) NumberUpperBound() (max Value, inclusive bool) { + if r.ty == DynamicPseudoType { + // We don't even know if this is a number yet. + return UnknownVal(Number), false + } + if r.ty != Number { + panic(fmt.Sprintf("NumberUpperBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementNumber); ok && rfn.max != NilVal { + if !rfn.max.IsKnown() { + return PositiveInfinity, true + } + return rfn.max, rfn.maxInc + } + return PositiveInfinity, false +} + +// StringPrefix returns a string that is guaranteed to be the prefix of +// the string value being described, or panics if the value is definitely not +// a string. +// +// If the value is nullable then the result represents the prefix of the string +// only if it turns out to not be null. +// +// If the resulting value is zero-length then the value could potentially be +// a string but it has no known prefix. +// +// cty.String values always contain normalized UTF-8 sequences; the result is +// also guaranteed to be a normalized UTF-8 sequence so the result also +// represents the exact bytes of the string value's prefix. +func (r ValueRange) StringPrefix() string { + if r.ty == DynamicPseudoType { + // We don't even know if this is a string yet. + return "" + } + if r.ty != String { + panic(fmt.Sprintf("StringPrefix for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementString); ok { + return rfn.prefix + } + return "" +} + +// LengthLowerBound returns information about the lower bound of the length of +// a collection-typed value, or panics if the value is definitely not a +// collection. +// +// If the value is nullable then the result represents the range of the length +// only if the value turns out not to be null. +func (r ValueRange) LengthLowerBound() int { + if r.ty == DynamicPseudoType { + // We don't even know if this is a collection yet. + return 0 + } + if !r.ty.IsCollectionType() { + panic(fmt.Sprintf("LengthLowerBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementCollection); ok { + return rfn.minLen + } + return 0 +} + +// LengthUpperBound returns information about the upper bound of the length of +// a collection-typed value, or panics if the value is definitely not a +// collection. +// +// If the value is nullable then the result represents the range of the length +// only if the value turns out not to be null. +// +// The resulting value might itself be an unknown number if there is no +// known upper bound. In that case the "inclusive" flag is meaningless. +func (r ValueRange) LengthUpperBound() int { + if r.ty == DynamicPseudoType { + // We don't even know if this is a collection yet. + return math.MaxInt + } + if !r.ty.IsCollectionType() { + panic(fmt.Sprintf("LengthUpperBound for %#v", r.ty)) + } + if rfn, ok := r.raw.(*refinementCollection); ok { + return rfn.maxLen + } + return math.MaxInt +} + +// Includes determines whether the given value is in the receiving range. +// +// It can return only three possible values: +// - [cty.True] if the range definitely includes the value +// - [cty.False] if the range definitely does not include the value +// - An unknown value of [cty.Bool] if there isn't enough information to decide. +// +// This function is not fully comprehensive: it may return an unknown value +// in some cases where a definitive value could be computed in principle, and +// those same situations may begin returning known values in later releases as +// the rules are refined to be more complete. Currently the rules focus mainly +// on answering [cty.False], because disproving membership tends to be more +// useful than proving membership. +func (r ValueRange) Includes(v Value) Value { + unknownResult := UnknownVal(Bool).RefineNotNull() + + if r.raw.null() == tristateTrue { + if v.IsNull() { + return True + } else { + return False + } + } + if r.raw.null() == tristateFalse { + if v.IsNull() { + return False + } + // A definitely-not-null value could potentially match + // but we won't know until we do some more checks below. + } + // If our range includes both null and non-null values and the value is + // null then it's definitely in range. + if v.IsNull() { + return True + } + if len(v.Type().TestConformance(r.TypeConstraint())) != 0 { + // If the value doesn't conform to the type constraint then it's + // definitely not in the range. + return False + } + if v.Type() == DynamicPseudoType { + // If it's an unknown value of an unknown type then there's no + // further tests we can make. + return unknownResult + } + + switch r.raw.(type) { + case *refinementString: + if v.IsKnown() { + prefix := r.StringPrefix() + got := v.AsString() + + if !strings.HasPrefix(got, prefix) { + return False + } + } + case *refinementCollection: + lenVal := v.Length() + minLen := NumberIntVal(int64(r.LengthLowerBound())) + maxLen := NumberIntVal(int64(r.LengthUpperBound())) + if minOk := lenVal.GreaterThanOrEqualTo(minLen); minOk.IsKnown() && minOk.False() { + return False + } + if maxOk := lenVal.LessThanOrEqualTo(maxLen); maxOk.IsKnown() && maxOk.False() { + return False + } + case *refinementNumber: + minVal, minInc := r.NumberLowerBound() + maxVal, maxInc := r.NumberUpperBound() + var minOk, maxOk Value + if minInc { + minOk = v.GreaterThanOrEqualTo(minVal) + } else { + minOk = v.GreaterThan(minVal) + } + if maxInc { + maxOk = v.LessThanOrEqualTo(maxVal) + } else { + maxOk = v.LessThan(maxVal) + } + if minOk.IsKnown() && minOk.False() { + return False + } + if maxOk.IsKnown() && maxOk.False() { + return False + } + } + + // If we fall out here then we don't have enough information to decide. + return unknownResult +} + +// numericRangeArithmetic is a helper we use to calculate derived numeric ranges +// for arithmetic on refined numeric values. +// +// op must be a monotone operation. numericRangeArithmetic adapts that operation +// into the equivalent interval arithmetic operation. +// +// The result is a superset of the range of the given operation against the +// given input ranges, if it's possible to calculate that without encountering +// an invalid operation. Currently the result is inexact due to ignoring +// the inclusiveness of the input bounds and just always returning inclusive +// bounds. +func numericRangeArithmetic(op func(a, b Value) Value, a, b ValueRange) func(*RefinementBuilder) *RefinementBuilder { + wrapOp := func(a, b Value) (ret Value) { + // Our functions have various panicking edge cases involving incompatible + // uses of infinities. To keep things simple here we'll catch those + // and just return an unconstrained number. + defer func() { + if v := recover(); v != nil { + ret = UnknownVal(Number) + } + }() + return op(a, b) + } + + return func(builder *RefinementBuilder) *RefinementBuilder { + aMin, _ := a.NumberLowerBound() + aMax, _ := a.NumberUpperBound() + bMin, _ := b.NumberLowerBound() + bMax, _ := b.NumberUpperBound() + + v1 := wrapOp(aMin, bMin) + v2 := wrapOp(aMin, bMax) + v3 := wrapOp(aMax, bMin) + v4 := wrapOp(aMax, bMax) + + newMin := mostNumberValue(Value.LessThan, v1, v2, v3, v4) + newMax := mostNumberValue(Value.GreaterThan, v1, v2, v3, v4) + + if isInf := newMin.Equals(NegativeInfinity); isInf.IsKnown() && isInf.False() { + builder = builder.NumberRangeLowerBound(newMin, true) + } + if isInf := newMax.Equals(PositiveInfinity); isInf.IsKnown() && isInf.False() { + builder = builder.NumberRangeUpperBound(newMax, true) + } + return builder + } +} + +func mostNumberValue(op func(i, j Value) Value, v1 Value, vN ...Value) Value { + r := v1 + for _, v := range vN { + more := op(v, r) + if !more.IsKnown() { + return UnknownVal(Number) + } + if more.True() { + r = v + } + } + return r +} + +// definitelyNotNull is a convenient helper for the common situation of checking +// whether a value could possibly be null. +// +// Returns true if the given value is either a known value that isn't null +// or an unknown value that has been refined to exclude null values from its +// range. +func definitelyNotNull(v Value) bool { + if v.IsKnown() { + return !v.IsNull() + } + return v.Range().DefinitelyNotNull() +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index bc62161d6e..00f963ea20 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -5,71 +5,18 @@ // Package curve25519 provides an implementation of the X25519 function, which // performs scalar multiplication on the elliptic curve known as Curve25519. // See RFC 7748. +// +// Starting in Go 1.20, this package is a wrapper for the X25519 implementation +// in the crypto/ecdh package. package curve25519 // import "golang.org/x/crypto/curve25519" -import ( - "crypto/subtle" - "errors" - "strconv" - - "golang.org/x/crypto/curve25519/internal/field" -) - // ScalarMult sets dst to the product scalar * point. // // Deprecated: when provided a low-order point, ScalarMult will set dst to all // zeroes, irrespective of the scalar. Instead, use the X25519 function, which // will return an error. func ScalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) + scalarMult(dst, scalar, point) } // ScalarBaseMult sets dst to the product scalar * base where base is the @@ -78,7 +25,7 @@ func ScalarMult(dst, scalar, point *[32]byte) { // It is recommended to use the X25519 function with Basepoint instead, as // copying into fixed size arrays can lead to unexpected bugs. func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) + scalarBaseMult(dst, scalar) } const ( @@ -91,21 +38,10 @@ const ( // Basepoint is the canonical Curve25519 generator. var Basepoint []byte -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +var basePoint = [32]byte{9} func init() { Basepoint = basePoint[:] } -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - // X25519 returns the result of the scalar multiplication (scalar * point), // according to RFC 7748, Section 5. scalar, point and the return value are // slices of 32 bytes. @@ -121,26 +57,3 @@ func X25519(scalar, point []byte) ([]byte, error) { var dst [32]byte return x25519(&dst, scalar, point) } - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") - } - if l := len(point); l != 32 { - return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, errors.New("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go new file mode 100644 index 0000000000..ba647e8d77 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go @@ -0,0 +1,105 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package curve25519 + +import ( + "crypto/subtle" + "errors" + "strconv" + + "golang.org/x/crypto/curve25519/internal/field" +) + +func scalarMult(dst, scalar, point *[32]byte) { + var e [32]byte + + copy(e[:], scalar[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element + x1.SetBytes(point[:]) + x2.One() + x3.Set(&x1) + z3.One() + + swap := 0 + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int(b) + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + swap = int(b) + + tmp0.Subtract(&x3, &z3) + tmp1.Subtract(&x2, &z2) + x2.Add(&x2, &z2) + z2.Add(&x3, &z3) + z3.Multiply(&tmp0, &x2) + z2.Multiply(&z2, &tmp1) + tmp0.Square(&tmp1) + tmp1.Square(&x2) + x3.Add(&z3, &z2) + z2.Subtract(&z3, &z2) + x2.Multiply(&tmp1, &tmp0) + tmp1.Subtract(&tmp1, &tmp0) + z2.Square(&z2) + + z3.Mult32(&tmp1, 121666) + x3.Square(&x3) + tmp0.Add(&tmp0, &z3) + z3.Multiply(&x1, &z2) + z2.Multiply(&tmp1, &tmp0) + } + + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + + z2.Invert(&z2) + x2.Multiply(&x2, &z2) + copy(dst[:], x2.Bytes()) +} + +func scalarBaseMult(dst, scalar *[32]byte) { + checkBasepoint() + scalarMult(dst, scalar, &basePoint) +} + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + var in [32]byte + if l := len(scalar); l != 32 { + return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32") + } + if l := len(point); l != 32 { + return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32") + } + copy(in[:], scalar) + if &point[0] == &Basepoint[0] { + scalarBaseMult(dst, &in) + } else { + var base, zero [32]byte + copy(base[:], point) + scalarMult(dst, &in, &base) + if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { + return nil, errors.New("bad input point: low order point") + } + } + return dst[:], nil +} + +func checkBasepoint() { + if subtle.ConstantTimeCompare(Basepoint, []byte{ + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }) != 1 { + panic("curve25519: global Basepoint value was modified") + } +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go new file mode 100644 index 0000000000..627df49727 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go @@ -0,0 +1,46 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package curve25519 + +import "crypto/ecdh" + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} + +func scalarMult(dst, scalar, point *[32]byte) { + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } +} + +func scalarBaseMult(dst, scalar *[32]byte) { + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) +} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go index 7b5b78cbd6..2671217da5 100644 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go @@ -245,7 +245,7 @@ func feSquareGeneric(v, a *Element) { v.carryPropagate() } -// carryPropagate brings the limbs below 52 bits by applying the reduction +// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction // identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline func (v *Element) carryPropagateGeneric() *Element { c0 := v.l0 >> 51 diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go index faa2fb3693..d62f787e9d 100644 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -61,7 +61,7 @@ type Key struct { type KeyRing interface { // KeysById returns the set of keys that have the given key id. KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id + // KeysByIdUsage returns the set of keys with the given id // that also meet the key usage given by requiredUsage. // The requiredUsage is expressed as the bitwise-OR of // packet.KeyFlag* values. @@ -183,7 +183,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { return } -// KeysByIdAndUsage returns the set of keys with the given id that also meet +// KeysByIdUsage returns the set of keys with the given id that also meet // the key usage given by requiredUsage. The requiredUsage is expressed as // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go index e8f0b5caa7..353f945247 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -60,7 +60,7 @@ func (c *Compressed) parse(r io.Reader) error { return err } -// compressedWriterCloser represents the serialized compression stream +// compressedWriteCloser represents the serialized compression stream // header and the compressor. Its Close() method ensures that both the // compressor and serialized stream header are closed. Its Write() // method writes to the compressor. diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index 87f48552ce..741e984f33 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -114,7 +114,8 @@ var cipherModes = map[string]*cipherMode{ "arcfour": {16, 0, streamCipherMode(0, newRC4)}, // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, + gcm128CipherID: {16, 12, newGCMCipher}, + gcm256CipherID: {32, 12, newGCMCipher}, chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, // CBC mode is insecure and so is not included in the default config. diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index c7964275de..e6a77f26a0 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -28,7 +28,7 @@ const ( // supportedCiphers lists ciphers we support but might not recommend. var supportedCiphers = []string{ "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", + "aes128-gcm@openssh.com", gcm256CipherID, chacha20Poly1305ID, "arcfour256", "arcfour128", "arcfour", aes128cbcID, @@ -37,7 +37,7 @@ var supportedCiphers = []string{ // preferredCiphers specifies the default preference for ciphers. var preferredCiphers = []string{ - "aes128-gcm@openssh.com", + "aes128-gcm@openssh.com", gcm256CipherID, chacha20Poly1305ID, "aes128-ctr", "aes192-ctr", "aes256-ctr", } @@ -168,7 +168,7 @@ func (a *directionAlgorithms) rekeyBytes() int64 { // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is // 128. switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: return 16 * (1 << 32) } @@ -178,7 +178,8 @@ func (a *directionAlgorithms) rekeyBytes() int64 { } var aeadCiphers = map[string]bool{ - gcmCipherID: true, + gcm128CipherID: true, + gcm256CipherID: true, chacha20Poly1305ID: true, } diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go index 35661a52be..8f345ee924 100644 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -97,7 +97,7 @@ func (c *connection) Close() error { return c.sshConn.conn.Close() } -// sshconn provides net.Conn metadata, but disallows direct reads and +// sshConn provides net.Conn metadata, but disallows direct reads and // writes. type sshConn struct { conn net.Conn diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 7296980413..dac8ee7244 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -1087,9 +1087,9 @@ func (*PassphraseMissingError) Error() string { return "ssh: this private key is passphrase protected" } -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It supports +// RSA, DSA, ECDSA, and Ed25519 private keys in PKCS#1, PKCS#8, OpenSSL, and OpenSSH +// formats. If the private key is encrypted, it will return a PassphraseMissingError. func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { block, _ := pem.Decode(pemBytes) if block == nil { diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index acf5a21bbb..da015801ea 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -17,7 +17,8 @@ import ( const debugTransport = false const ( - gcmCipherID = "aes128-gcm@openssh.com" + gcm128CipherID = "aes128-gcm@openssh.com" + gcm256CipherID = "aes256-gcm@openssh.com" aes128cbcID = "aes128-cbc" tripledescbcID = "3des-cbc" ) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go new file mode 100644 index 0000000000..2681af35af --- /dev/null +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *Regexp) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *Regexp) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *Regexp) FindSubmatch(s []byte) [][]byte { + return r.re().FindSubmatch(s) +} + +func (r *Regexp) FindStringSubmatch(s string) []string { + return r.re().FindStringSubmatch(s) +} + +func (r *Regexp) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *Regexp) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *Regexp) FindString(s string) string { + return r.re().FindString(s) +} + +func (r *Regexp) FindAllString(s string, n int) []string { + return r.re().FindAllString(s, n) +} + +func (r *Regexp) MatchString(s string) bool { + return r.re().MatchString(s) +} + +func (r *Regexp) SubexpNames() []string { + return r.re().SubexpNames() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go new file mode 100644 index 0000000000..524f93022a --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file printer. + +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +// Format returns a go.mod file as a byte slice, formatted in standard style. +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + return pr.Bytes() +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + p.tokens(x.Token) + + case *LineBlock: + p.tokens(x.Token) + p.printf(" ") + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} + +func (p *printer) tokens(tokens []string) { + sep := "" + for _, t := range tokens { + if t == "," || t == ")" || t == "]" || t == "}" { + sep = "" + } + p.printf("%s%s", sep, t) + sep = " " + if t == "(" || t == "[" || t == "{" { + sep = "" + } + } +} diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go new file mode 100644 index 0000000000..a503bc2105 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -0,0 +1,958 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes an arbitrary source position in a file, including the +// file, line, column, and byte offset. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a Comments struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +// addLine adds a line containing the given tokens to the file. +// +// If the first token of the hint matches the first token of the +// line, the new line is added at the end of the block containing hint, +// extracting hint into a new block if it is not yet in one. +// +// If the hint is non-nil buts its first token does not match, +// the new line is added after the block containing hint +// (or hint itself, if not in a block). +// +// If no hint is provided, addLine appends the line to the end of +// the last block with a matching first token, +// or to the end of the file if no such block exists. +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + newLineAfter := func(i int) *Line { + new := &Line{Token: tokens} + if i == len(x.Stmt) { + x.Stmt = append(x.Stmt, new) + } else { + x.Stmt = append(x.Stmt, nil) + copy(x.Stmt[i+2:], x.Stmt[i+1:]) + x.Stmt[i+1] = new + } + return new + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + if stmt.Token == nil || stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + + case *LineBlock: + if stmt == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + + for j, line := range stmt.Line { + if line == hint { + if stmt.Token[0] != tokens[0] { + return newLineAfter(i) + } + + // Add new line after hint within the block. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +// markRemoved modifies line so that it (and its end-of-line comment, if any) +// will be dropped by (*FileSyntax).Cleanup. +func (line *Line) markRemoved() { + line.Token = nil + line.Comments.Suffix = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, (*Line).markRemoved marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + tokenStart []byte // token being scanned to end of input + token token // next token to be returned by lex, peek + pos Position // current input position + comments []Comment // accumulated comments + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseErrors ErrorList // errors encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + in := newInput(file, data) + defer func() { + if e := recover(); e != nil && e != &in.parseErrors { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: fmt.Errorf("internal error: %v", e), + }) + } + if err == nil && len(in.parseErrors) > 0 { + err = in.parseErrors + } + }() + + // Prime the lexer by reading in the first token. It will be available + // in the next peek() or lex() call. + in.readToken() + + // Invoke the parser. + in.parseFile() + if len(in.parseErrors) > 0 { + return nil, in.parseErrors + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// Error does not return: it panics. +func (in *input) Error(s string) { + in.parseErrors = append(in.parseErrors, Error{ + Filename: in.filename, + Pos: in.pos, + Err: errors.New(s), + }) + panic(&in.parseErrors) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type token struct { + kind tokenKind + pos Position + endPos Position + text string +} + +type tokenKind int + +const ( + _EOF tokenKind = -(iota + 1) + _EOLCOMMENT + _IDENT + _STRING + _COMMENT + + // newlines and punctuation tokens are allowed as ASCII codes. +) + +func (k tokenKind) isComment() bool { + return k == _COMMENT || k == _EOLCOMMENT +} + +// isEOL returns whether a token terminates a line. +func (k tokenKind) isEOL() bool { + return k == _EOF || k == _EOLCOMMENT || k == '\n' +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token's text has +// been consumed using readRune. +func (in *input) startToken() { + in.tokenStart = in.remaining + in.token.text = "" + in.token.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in tok.text. +// A single trailing newline (LF or CRLF) will be removed from comment tokens. +func (in *input) endToken(kind tokenKind) { + in.token.kind = kind + text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) + if kind.isComment() { + if strings.HasSuffix(text, "\r\n") { + text = text[:len(text)-2] + } else { + text = strings.TrimSuffix(text, "\n") + } + } + in.token.text = text + in.token.endPos = in.pos +} + +// peek returns the kind of the next token returned by lex. +func (in *input) peek() tokenKind { + return in.token.kind +} + +// lex is called from the parser to obtain the next input token. +func (in *input) lex() token { + tok := in.token + in.readToken() + return tok +} + +// readToken lexes the next token from the text and stores it in in.token. +func (in *input) readToken() { + // Skip past spaces, stopping at non-space or EOF. + for !in.eof() { + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken() + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + in.endToken(_COMMENT) + return + } + + // Otherwise, save comment for later attachment to syntax tree. + in.endToken(_EOLCOMMENT) + in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix}) + return + } + + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken() + + // End of file. + if in.eof() { + in.endToken(_EOF) + return + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n', '(', ')', '[', ']', '{', '}', ',': + in.readRune() + in.endToken(tokenKind(c)) + return + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = in.token.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(_STRING) + return + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error("mod files must use // comments (not /* */ comments)") + } + in.readRune() + } + in.endToken(_IDENT) +} + +// isIdent reports whether c is an identifier rune. +// We treat most printable runes as identifier runes, except for a handful of +// ASCII punctuation characters. +func isIdent(c int) bool { + switch r := rune(c); r { + case ' ', '(', ')', '[', ']', '{', '}', ',': + return false + default: + return !unicode.IsSpace(r) && unicode.IsPrint(r) + } +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var cb *CommentBlock + for { + switch in.peek() { + case '\n': + in.lex() + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + tok := in.lex() + if cb == nil { + cb = &CommentBlock{Start: tok.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt() + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt() { + tok := in.lex() + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + switch { + case tok.kind.isEOL(): + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: tokens, + End: end, + }) + return + + case tok.kind == '(': + if next := in.peek(); next.isEOL() { + // Start of block: no more tokens on this line. + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok)) + return + } else if next == ')' { + rparen := in.lex() + if in.peek().isEOL() { + // Empty block. + in.lex() + in.file.Stmt = append(in.file.Stmt, &LineBlock{ + Start: start, + Token: tokens, + LParen: LParen{Pos: tok.pos}, + RParen: RParen{Pos: rparen.pos}, + }) + return + } + // '( )' in the middle of the line, not a block. + tokens = append(tokens, tok.text, rparen.text) + } else { + // '(' in the middle of the line, not a block. + tokens = append(tokens, tok.text) + } + + default: + tokens = append(tokens, tok.text) + end = tok.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: lparen.pos}, + } + var comments []Comment + for { + switch in.peek() { + case _EOLCOMMENT: + // Suffix comment, will be attached later by assignComments. + in.lex() + case '\n': + // Blank line. Add an empty comment to preserve it. + in.lex() + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + tok := in.lex() + comments = append(comments, Comment{Start: tok.pos, Token: tok.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + rparen := in.lex() + x.RParen.Before = comments + x.RParen.Pos = rparen.pos + if !in.peek().isEOL() { + in.Error("syntax error (expected newline after closing paren)") + } + in.lex() + return x + default: + l := in.parseLine() + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine() *Line { + tok := in.lex() + if tok.kind.isEOL() { + in.Error("internal parse error: parseLine at end of line") + } + start := tok.pos + end := tok.endPos + tokens := []string{tok.text} + for { + tok := in.lex() + if tok.kind.isEOL() { + return &Line{ + Start: start, + Token: tokens, + End: end, + InBlock: true, + } + } + tokens = append(tokens, tok.text) + end = tok.endPos + } +} + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go new file mode 100644 index 0000000000..6bcde8fabe --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -0,0 +1,1559 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements a parser and formatter for go.mod files. +// +// The go.mod syntax is described in +// https://golang.org/cmd/go/#hdr-The_go_mod_file. +// +// The Parse and ParseLax functions both parse a go.mod file and return an +// abstract syntax tree. ParseLax ignores unknown statements and may be used to +// parse go.mod files that may have been developed with newer versions of Go. +// +// The File struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like AddNewRequire and DropReplace +// that can be used to programmatically edit a file. +// +// The Format function formats a File back to a byte slice which can be +// written to a file. +package modfile + +import ( + "errors" + "fmt" + "path/filepath" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Deprecated string + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +// A Retract is a single retract statement. +type Retract struct { + VersionInterval + Rationale string + Syntax *Line +} + +// A VersionInterval represents a range of versions with upper and lower bounds. +// Intervals are closed: both bounds are included. When Low is equal to High, +// the interval may refer to a single version ('v1.2.3') or an interval +// ('[v1.2.3, v1.2.3]'); both have the same representation. +type VersionInterval struct { + Low, High string +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +func (r *Require) markRemoved() { + r.Syntax.markRemoved() + *r = Require{} +} + +func (r *Require) setVersion(v string) { + r.Mod.Version = v + + if line := r.Syntax; len(line.Token) > 0 { + if line.InBlock { + // If the line is preceded by an empty line, remove it; see + // https://golang.org/issue/33779. + if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 { + line.Comments.Before = line.Comments.Before[:0] + } + if len(line.Token) >= 2 { // example.com v1.2.3 + line.Token[1] = v + } + } else { + if len(line.Token) >= 3 { // require example.com v1.2.3 + line.Token[2] = v + } + } + } +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func (r *Require) setIndirect(indirect bool) { + r.Indirect = indirect + line := r.Syntax + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + + com := &line.Suffix[0] + text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash))) + if text == "" { + // Empty comment. + com.Token = "// indirect" + return + } + + // Insert at beginning of existing comment. + com.Token = "// indirect; " + text + return + } + + // Removing comment. + f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + if f == "indirect" { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) + return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;") +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// errDontFix is returned by a VersionFixer to indicate the version should be +// left alone, even if it's not canonical. +var dontFixRetract VersionFixer = func(_, vers string) (string, error) { + return vers, nil +} + +// Parse parses and returns a go.mod file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// must return the same string). +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + var errs ErrorList + + // fix versions in retract directives after the file is parsed. + // We need the module path to fix versions, and it might be at the end. + defer func() { + oldLen := len(errs) + f.fixRetract(fix, &errs) + if len(errs) > oldLen { + parsed, err = nil, errs + } + }() + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + } + switch x.Token[0] { + default: + if strict { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + } + continue + case "module", "require", "exclude", "replace", "retract": + for _, l := range x.Line { + f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) +var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) + +func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "go", "module", "retract", "require": + // want these even for dependency go.mods + default: + return + } + } + + wrapModPathError := func(modPath string, err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + }) + } + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + fixed := false + if !strict { + if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil { + args[0] = m[1] + fixed = true + } + } + if !fixed { + errorf("invalid go version '%s': must match format 1.23", args[0]) + return + } + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "module": + if f.Module != nil { + errorf("repeated module statement") + return + } + deprecated := parseDeprecation(block, line) + f.Module = &Module{ + Syntax: line, + Deprecated: deprecated, + } + if len(args) != 1 { + errorf("usage: module module/path") + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Module.Mod = module.Version{Path: s} + + case "require", "exclude": + if len(args) != 2 { + errorf("usage: %s module/path v1.2.3", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + v, err := parseVersion(verb, s, &args[1], fix) + if err != nil { + wrapError(err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + wrapError(err) + return + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + wrapModPathError(s, err) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + + case "retract": + rationale := parseDirectiveComment(block, line) + vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) + if err != nil { + if strict { + wrapError(err) + return + } else { + // Only report errors parsing intervals in the main module. We may + // support additional syntax in the future, such as open and half-open + // intervals. Those can't be supported now, because they break the + // go.mod parser, even in lax mode. + return + } + } + if len(args) > 0 && strict { + // In the future, there may be additional information after the version. + errorf("unexpected token after version: %q", args[0]) + return + } + retract := &Retract{ + VersionInterval: vi, + Rationale: rationale, + Syntax: line, + } + f.Retract = append(f.Retract, retract) + } +} + +func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) { + wrapModPathError := func(modPath string, err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + ModPath: modPath, + Verb: verb, + Err: err, + } + } + wrapError := func(err error) *Error { + return &Error{ + Filename: filename, + Pos: line.Start, + Err: err, + } + } + errorf := func(format string, args ...interface{}) *Error { + return wrapError(fmt.Errorf(format, args...)) + } + + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb) + } + s, err := parseString(&args[0]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + pathMajor, err := modulePathMajor(s) + if err != nil { + return nil, wrapModPathError(s, err) + + } + var v string + if arrow == 2 { + v, err = parseVersion(verb, s, &args[1], fix) + if err != nil { + return nil, wrapError(err) + } + if err := module.CheckPathMajor(v, pathMajor); err != nil { + return nil, wrapModPathError(s, err) + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + return nil, errorf("invalid quoted string: %v", err) + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + if strings.Contains(ns, "@") { + return nil, errorf("replacement module must match format 'path version', not 'path@version'") + } + return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") + } + } + if len(args) == arrow+3 { + nv, err = parseVersion(verb, ns, &args[arrow+2], fix) + if err != nil { + return nil, wrapError(err) + } + if IsDirectoryPath(ns) { + return nil, errorf("replacement module directory path %q cannot have version", ns) + + } + } + return &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }, nil +} + +// fixRetract applies fix to each retract directive in f, appending any errors +// to errs. +// +// Most versions are fixed as we parse the file, but for retract directives, +// the relevant module path is the one specified with the module directive, +// and that might appear at the end of the file (or not at all). +func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { + if fix == nil { + return + } + path := "" + if f.Module != nil { + path = f.Module.Mod.Path + } + var r *Retract + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: r.Syntax.Start, + Err: err, + }) + } + + for _, r = range f.Retract { + if path == "" { + wrapError(errors.New("no module directive found, so retract cannot be used")) + return // only print the first one of these + } + + args := r.Syntax.Token + if args[0] == "retract" { + args = args[1:] + } + vi, err := parseVersionInterval("retract", path, &args, fix) + if err != nil { + wrapError(err) + } + r.VersionInterval = vi + } +} + +func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) { + wrapError := func(err error) { + *errs = append(*errs, Error{ + Filename: f.Syntax.Name, + Pos: line.Start, + Err: err, + }) + } + errorf := func(format string, args ...interface{}) { + wrapError(fmt.Errorf(format, args...)) + } + + switch verb { + default: + errorf("unknown directive: %s", verb) + + case "go": + if f.Go != nil { + errorf("repeated go statement") + return + } + if len(args) != 1 { + errorf("go directive expects exactly one argument") + return + } else if !GoVersionRE.MatchString(args[0]) { + errorf("invalid go version '%s': must match format 1.23", args[0]) + return + } + + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + + case "use": + if len(args) != 1 { + errorf("usage: %s local/dir", verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + errorf("invalid quoted string: %v", err) + return + } + f.Use = append(f.Use, &Use{ + Path: s, + Syntax: line, + }) + + case "replace": + replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) + if wrappederr != nil { + *errs = append(*errs, *wrappederr) + return + } + f.Replace = append(f.Replace, replace) + } +} + +// IsDirectoryPath reports whether the given path should be interpreted +// as a directory path. Just like on the go command line, relative paths +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || + strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + switch r { + case ' ', '"', '\'', '`': + return true + + case '(', ')', '[', ']', '{', '}', ',': + if len(s) > 1 { + return true + } + + default: + if !unicode.IsPrint(r) { + return true + } + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { + toks := *args + if len(toks) == 0 || toks[0] == "(" { + return VersionInterval{}, fmt.Errorf("expected '[' or version") + } + if toks[0] != "[" { + v, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + *args = toks[1:] + return VersionInterval{Low: v, High: v}, nil + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after '['") + } + low, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "," { + return VersionInterval{}, fmt.Errorf("expected ',' after version") + } + toks = toks[1:] + + if len(toks) == 0 { + return VersionInterval{}, fmt.Errorf("expected version after ','") + } + high, err := parseVersion(verb, path, &toks[0], fix) + if err != nil { + return VersionInterval{}, err + } + toks = toks[1:] + + if len(toks) == 0 || toks[0] != "]" { + return VersionInterval{}, fmt.Errorf("expected ']' after version") + } + toks = toks[1:] + + *args = toks + return VersionInterval{Low: low, High: high}, nil +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`) + +// parseDeprecation extracts the text of comments on a "module" directive and +// extracts a deprecation message from that. +// +// A deprecation message is contained in a paragraph within a block of comments +// that starts with "Deprecated:" (case sensitive). The message runs until the +// end of the paragraph and does not include the "Deprecated:" prefix. If the +// comment block has multiple paragraphs that start with "Deprecated:", +// parseDeprecation returns the message from the first. +func parseDeprecation(block *LineBlock, line *Line) string { + text := parseDirectiveComment(block, line) + m := deprecatedRE.FindStringSubmatch(text) + if m == nil { + return "" + } + return m[1] +} + +// parseDirectiveComment extracts the text of comments on a directive. +// If the directive's line does not have comments and is part of a block that +// does have comments, the block's comments are used. +func parseDirectiveComment(block *LineBlock, line *Line) string { + comments := line.Comment() + if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { + comments = block.Comment() + } + groups := [][]Comment{comments.Before, comments.Suffix} + var lines []string + for _, g := range groups { + for _, c := range g { + if !strings.HasPrefix(c.Token, "//") { + continue // blank line + } + lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) + } + } + return strings.Join(lines, "\n") +} + +type ErrorList []Error + +func (e ErrorList) Error() string { + errStrs := make([]string, len(e)) + for i, err := range e { + errStrs[i] = err.Error() + } + return strings.Join(errStrs, "\n") +} + +type Error struct { + Filename string + Pos Position + Verb string + ModPath string + Err error +} + +func (e *Error) Error() string { + var pos string + if e.Pos.LineRune > 1 { + // Don't print LineRune if it's 1 (beginning of line). + // It's always 1 except in scanner errors, which are rare. + pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune) + } else if e.Pos.Line > 0 { + pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line) + } else if e.Filename != "" { + pos = fmt.Sprintf("%s: ", e.Filename) + } + + var directive string + if e.ModPath != "" { + directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) + } else if e.Verb != "" { + directive = fmt.Sprintf("%s: ", e.Verb) + } + + return pos + directive + e.Err.Error() +} + +func (e *Error) Unwrap() error { return e.Err } + +func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: *s, + Err: err, + }, + } + } + if fix != nil { + fixed, err := fix(path, t) + if err != nil { + if err, ok := err.(*module.ModuleError); ok { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: err.Err, + } + } + return "", err + } + t = fixed + } else { + cv := module.CanonicalVersion(t) + if cv == "" { + return "", &Error{ + Verb: verb, + ModPath: path, + Err: &module.InvalidVersionError{ + Version: t, + Err: errors.New("must be of the form v1.2.3"), + }, + } + } + t = cv + } + *s = t + return *s, nil +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + w = 0 + for _, r := range f.Retract { + if r.Low != "" || r.High != "" { + f.Retract[w] = r + w++ + } + } + f.Retract = f.Retract[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + var hint Expr + if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Go = &Go{ + Version: version, + Syntax: f.Syntax.addLine(hint, "go", version), + } + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +// AddRequire sets the first require line for path to version vers, +// preserving any existing comments for that line and removing all +// other lines for path. +// +// If no line currently exists for path, AddRequire adds a new line +// at the end of the last require block. +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + r.Syntax.markRemoved() + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +// AddNewRequire adds a new require line for path at version vers at the end of +// the last require block, regardless of any existing require lines for path. +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + r := &Require{ + Mod: module.Version{Path: path, Version: vers}, + Syntax: line, + } + r.setIndirect(indirect) + f.Require = append(f.Require, r) +} + +// SetRequire updates the requirements of f to contain exactly req, preserving +// the existing block structure and line comment contents (except for 'indirect' +// markings) for the first requirement on each named module path. +// +// The Syntax field is ignored for the requirements in req. +// +// Any requirements not already present in the file are added to the block +// containing the last require line. +// +// The requirements in req must specify at most one distinct version for each +// module path. +// +// If any existing requirements may be removed, the caller should call Cleanup +// after all edits are complete. +func (f *File) SetRequire(req []*Require) { + type elem struct { + version string + indirect bool + } + need := make(map[string]elem) + for _, r := range req { + if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version { + panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version)) + } + need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect} + } + + // Update or delete the existing Require entries to preserve + // only the first for each module path in req. + for _, r := range f.Require { + e, ok := need[r.Mod.Path] + if ok { + r.setVersion(e.version) + r.setIndirect(e.indirect) + } else { + r.markRemoved() + } + delete(need, r.Mod.Path) + } + + // Add new entries in the last block of the file for any paths that weren't + // already present. + // + // This step is nondeterministic, but the final result will be deterministic + // because we will sort the block. + for path, e := range need { + f.AddNewRequire(path, e.version, e.indirect) + } + + f.SortBlocks() +} + +// SetRequireSeparateIndirect updates the requirements of f to contain the given +// requirements. Comment contents (except for 'indirect' markings) are retained +// from the first existing requirement for each module path. Like SetRequire, +// SetRequireSeparateIndirect adds requirements for new paths in req, +// updates the version and "// indirect" comment on existing requirements, +// and deletes requirements on paths not in req. Existing duplicate requirements +// are deleted. +// +// As its name suggests, SetRequireSeparateIndirect puts direct and indirect +// requirements into two separate blocks, one containing only direct +// requirements, and the other containing only indirect requirements. +// SetRequireSeparateIndirect may move requirements between these two blocks +// when their indirect markings change. However, SetRequireSeparateIndirect +// won't move requirements from other blocks, especially blocks with comments. +// +// If the file initially has one uncommented block of requirements, +// SetRequireSeparateIndirect will split it into a direct-only and indirect-only +// block. This aids in the transition to separate blocks. +func (f *File) SetRequireSeparateIndirect(req []*Require) { + // hasComments returns whether a line or block has comments + // other than "indirect". + hasComments := func(c Comments) bool { + return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 || + (len(c.Suffix) == 1 && + strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect") + } + + // moveReq adds r to block. If r was in another block, moveReq deletes + // it from that block and transfers its comments. + moveReq := func(r *Require, block *LineBlock) { + var line *Line + if r.Syntax == nil { + line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}} + r.Syntax = line + if r.Indirect { + r.setIndirect(true) + } + } else { + line = new(Line) + *line = *r.Syntax + if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" { + line.Token = line.Token[1:] + } + r.Syntax.Token = nil // Cleanup will delete the old line. + r.Syntax = line + } + line.InBlock = true + block.Line = append(block.Line, line) + } + + // Examine existing require lines and blocks. + var ( + // We may insert new requirements into the last uncommented + // direct-only and indirect-only blocks. We may also move requirements + // to the opposite block if their indirect markings change. + lastDirectIndex = -1 + lastIndirectIndex = -1 + + // If there are no direct-only or indirect-only blocks, a new block may + // be inserted after the last require line or block. + lastRequireIndex = -1 + + // If there's only one require line or block, and it's uncommented, + // we'll move its requirements to the direct-only or indirect-only blocks. + requireLineOrBlockCount = 0 + + // Track the block each requirement belongs to (if any) so we can + // move them later. + lineToBlock = make(map[*Line]*LineBlock) + ) + for i, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + if !hasComments(stmt.Comments) { + if isIndirect(stmt) { + lastIndirectIndex = i + } else { + lastDirectIndex = i + } + } + + case *LineBlock: + if len(stmt.Token) == 0 || stmt.Token[0] != "require" { + continue + } + lastRequireIndex = i + requireLineOrBlockCount++ + allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) + for _, line := range stmt.Line { + lineToBlock[line] = stmt + if hasComments(line.Comments) { + allDirect = false + allIndirect = false + } else if isIndirect(line) { + allDirect = false + } else { + allIndirect = false + } + } + if allDirect { + lastDirectIndex = i + } + if allIndirect { + lastIndirectIndex = i + } + } + } + + oneFlatUncommentedBlock := requireLineOrBlockCount == 1 && + !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment()) + + // Create direct and indirect blocks if needed. Convert lines into blocks + // if needed. If we end up with an empty block or a one-line block, + // Cleanup will delete it or convert it to a line later. + insertBlock := func(i int) *LineBlock { + block := &LineBlock{Token: []string{"require"}} + f.Syntax.Stmt = append(f.Syntax.Stmt, nil) + copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) + f.Syntax.Stmt[i] = block + return block + } + + ensureBlock := func(i int) *LineBlock { + switch stmt := f.Syntax.Stmt[i].(type) { + case *LineBlock: + return stmt + case *Line: + block := &LineBlock{ + Token: []string{"require"}, + Line: []*Line{stmt}, + } + stmt.Token = stmt.Token[1:] // remove "require" + stmt.InBlock = true + f.Syntax.Stmt[i] = block + return block + default: + panic(fmt.Sprintf("unexpected statement: %v", stmt)) + } + } + + var lastDirectBlock *LineBlock + if lastDirectIndex < 0 { + if lastIndirectIndex >= 0 { + lastDirectIndex = lastIndirectIndex + lastIndirectIndex++ + } else if lastRequireIndex >= 0 { + lastDirectIndex = lastRequireIndex + 1 + } else { + lastDirectIndex = len(f.Syntax.Stmt) + } + lastDirectBlock = insertBlock(lastDirectIndex) + } else { + lastDirectBlock = ensureBlock(lastDirectIndex) + } + + var lastIndirectBlock *LineBlock + if lastIndirectIndex < 0 { + lastIndirectIndex = lastDirectIndex + 1 + lastIndirectBlock = insertBlock(lastIndirectIndex) + } else { + lastIndirectBlock = ensureBlock(lastIndirectIndex) + } + + // Delete requirements we don't want anymore. + // Update versions and indirect comments on requirements we want to keep. + // If a requirement is in last{Direct,Indirect}Block with the wrong + // indirect marking after this, or if the requirement is in an single + // uncommented mixed block (oneFlatUncommentedBlock), move it to the + // correct block. + // + // Some blocks may be empty after this. Cleanup will remove them. + need := make(map[string]*Require) + for _, r := range req { + need[r.Mod.Path] = r + } + have := make(map[string]*Require) + for _, r := range f.Require { + path := r.Mod.Path + if need[path] == nil || have[path] != nil { + // Requirement not needed, or duplicate requirement. Delete. + r.markRemoved() + continue + } + have[r.Mod.Path] = r + r.setVersion(need[path].Mod.Version) + r.setIndirect(need[path].Indirect) + if need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) { + moveReq(r, lastIndirectBlock) + } else if !need[path].Indirect && + (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) { + moveReq(r, lastDirectBlock) + } + } + + // Add new requirements. + for path, r := range need { + if have[path] == nil { + if r.Indirect { + moveReq(r, lastIndirectBlock) + } else { + moveReq(r, lastDirectBlock) + } + f.Require = append(f.Require, r) + } + } + + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + r.Syntax.markRemoved() + *r = Require{} + } + } + return nil +} + +// AddExclude adds a exclude statement to the mod file. Errors if the provided +// version is not a canonical version string +func (f *File) AddExclude(path, vers string) error { + if err := checkCanonicalVersion(path, vers); err != nil { + return err + } + + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + x.Syntax.markRemoved() + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range *replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + r.Syntax.markRemoved() + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +// AddRetract adds a retract statement to the mod file. Errors if the provided +// version interval does not consist of canonical version strings +func (f *File) AddRetract(vi VersionInterval, rationale string) error { + var path string + if f.Module != nil { + path = f.Module.Mod.Path + } + if err := checkCanonicalVersion(path, vi.High); err != nil { + return err + } + if err := checkCanonicalVersion(path, vi.Low); err != nil { + return err + } + + r := &Retract{ + VersionInterval: vi, + } + if vi.Low == vi.High { + r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) + } else { + r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") + } + if rationale != "" { + for _, line := range strings.Split(rationale, "\n") { + com := Comment{Token: "// " + line} + r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) + } + } + return nil +} + +func (f *File) DropRetract(vi VersionInterval) error { + for _, r := range f.Retract { + if r.VersionInterval == vi { + r.Syntax.markRemoved() + *r = Retract{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + less := lineLess + if block.Token[0] == "retract" { + less = lineRetractLess + } + sort.SliceStable(block.Line, func(i, j int) bool { + return less(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate exclude and replace directives. +// +// Earlier exclude directives take priority. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *File) removeDups() { + removeDups(f.Syntax, &f.Exclude, &f.Replace) +} + +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { + kill := make(map[*Line]bool) + + // Remove duplicate excludes. + if exclude != nil { + haveExclude := make(map[module.Version]bool) + for _, x := range *exclude { + if haveExclude[x.Mod] { + kill[x.Syntax] = true + continue + } + haveExclude[x.Mod] = true + } + var excl []*Exclude + for _, x := range *exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + *exclude = excl + } + + // Remove duplicate replacements. + // Later replacements take priority over earlier ones. + haveReplace := make(map[module.Version]bool) + for i := len(*replace) - 1; i >= 0; i-- { + x := (*replace)[i] + if haveReplace[x.Old] { + kill[x.Syntax] = true + continue + } + haveReplace[x.Old] = true + } + var repl []*Replace + for _, x := range *replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + *replace = repl + + // Duplicate require and retract directives are not removed. + + // Drop killed statements from the syntax tree. + var stmts []Expr + for _, stmt := range syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + syntax.Stmt = stmts +} + +// lineLess returns whether li should be sorted before lj. It sorts +// lexicographically without assigning any special meaning to tokens. +func lineLess(li, lj *Line) bool { + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) +} + +// lineRetractLess returns whether li should be sorted before lj for lines in +// a "retract" block. It treats each line as a version interval. Single versions +// are compared as if they were intervals with the same low and high version. +// Intervals are sorted in descending order, first by low version, then by +// high version, using semver.Compare. +func lineRetractLess(li, lj *Line) bool { + interval := func(l *Line) VersionInterval { + if len(l.Token) == 1 { + return VersionInterval{Low: l.Token[0], High: l.Token[0]} + } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { + return VersionInterval{Low: l.Token[1], High: l.Token[3]} + } else { + // Line in unknown format. Treat as an invalid version. + return VersionInterval{} + } + } + vii := interval(li) + vij := interval(lj) + if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { + return cmp > 0 + } + return semver.Compare(vii.High, vij.High) > 0 +} + +// checkCanonicalVersion returns a non-nil error if vers is not a canonical +// version string or does not match the major version of path. +// +// If path is non-empty, the error text suggests a format with a major version +// corresponding to the path. +func checkCanonicalVersion(path, vers string) error { + _, pathMajor, pathMajorOk := module.SplitPathVersion(path) + + if vers == "" || vers != module.CanonicalVersion(vers) { + if pathMajor == "" { + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form v1.2.3"), + } + } + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), + } + } + + if pathMajorOk { + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + if pathMajor == "" { + // In this context, the user probably wrote "v2.3.4" when they meant + // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". + return &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), + } + } + return err + } + } + + return nil +} diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go new file mode 100644 index 0000000000..0c0e521525 --- /dev/null +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -0,0 +1,234 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "fmt" + "sort" + "strings" +) + +// A WorkFile is the parsed, interpreted form of a go.work file. +type WorkFile struct { + Go *Go + Use []*Use + Replace []*Replace + + Syntax *FileSyntax +} + +// A Use is a single directory statement. +type Use struct { + Path string // Use path of module. + ModulePath string // Module path in the comment. + Syntax *Line +} + +// ParseWork parses and returns a go.work file. +// +// file is the name of the file, used in positions and errors. +// +// data is the content of the file. +// +// fix is an optional function that canonicalizes module versions. +// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// must return the same string). +func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &WorkFile{ + Syntax: fs, + } + var errs ErrorList + + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix) + + case *LineBlock: + if len(x.Token) > 1 { + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + } + switch x.Token[0] { + default: + errs = append(errs, Error{ + Filename: file, + Pos: x.Start, + Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), + }) + continue + case "use", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix) + } + } + } + } + + if len(errs) > 0 { + return nil, errs + } + return f, nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *WorkFile) Cleanup() { + w := 0 + for _, r := range f.Use { + if r.Path != "" { + f.Use[w] = r + w++ + } + } + f.Use = f.Use[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *WorkFile) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + stmt := &Line{Token: []string{"go", version}} + f.Go = &Go{ + Version: version, + Syntax: stmt, + } + // Find the first non-comment-only block that's and add + // the go statement before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + +func (f *WorkFile) AddUse(diskPath, modulePath string) error { + need := true + for _, d := range f.Use { + if d.Path == diskPath { + if need { + d.ModulePath = modulePath + f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath)) + need = false + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + } + + if need { + f.AddNewUse(diskPath, modulePath) + } + return nil +} + +func (f *WorkFile) AddNewUse(diskPath, modulePath string) { + line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath)) + f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line}) +} + +func (f *WorkFile) SetUse(dirs []*Use) { + need := make(map[string]string) + for _, d := range dirs { + need[d.Path] = d.ModulePath + } + + for _, d := range f.Use { + if modulePath, ok := need[d.Path]; ok { + d.ModulePath = modulePath + } else { + d.Syntax.markRemoved() + *d = Use{} + } + } + + // TODO(#45713): Add module path to comment. + + for diskPath, modulePath := range need { + f.AddNewUse(diskPath, modulePath) + } + f.SortBlocks() +} + +func (f *WorkFile) DropUse(path string) error { + for _, d := range f.Use { + if d.Path == path { + d.Syntax.markRemoved() + *d = Use{} + } + } + return nil +} + +func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error { + return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) +} + +func (f *WorkFile) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + r.Syntax.markRemoved() + *r = Replace{} + } + } + return nil +} + +func (f *WorkFile) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.SliceStable(block.Line, func(i, j int) bool { + return lineLess(block.Line[i], block.Line[j]) + }) + } +} + +// removeDups removes duplicate replace directives. +// +// Later replace directives take priority. +// +// require directives are not de-duplicated. That's left up to higher-level +// logic (MVS). +// +// retract directives are not de-duplicated since comments are +// meaningful, and versions may be retracted multiple times. +func (f *WorkFile) removeDups() { + removeDups(f.Syntax, nil, &f.Replace) +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go new file mode 100644 index 0000000000..e9dec6e614 --- /dev/null +++ b/vendor/golang.org/x/mod/module/module.go @@ -0,0 +1,841 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type along with support code. +// +// The module.Version type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably Check, verify that +// a particular path, version pair is valid. +// +// # Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// # Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/mod/semver" +) + +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. +type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". + Path string + + // Version is usually a semantic version in canonical form. + // There are three exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. + Version string `json:",omitempty"` +} + +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a ModuleError derived from a Version and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + +// An InvalidPathError indicates a module, import, or file path doesn't +// satisfy all naming constraints. See CheckPath, CheckImportPath, +// and CheckFilePath for specific restrictions. +type InvalidPathError struct { + Kind string // "module", "import", or "file" + Path string + Err error +} + +func (e *InvalidPathError) Error() string { + return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) +} + +func (e *InvalidPathError) Unwrap() error { return e.Err } + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } + } + _, pathMajor, _ := SplitPathVersion(path) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// modPathOK reports whether r can appear in a module path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// +// This matches what "go get" has historically recognized in import paths, +// and avoids confusing sequences like '%20' or '+' that would change meaning +// if used in a URL. +// +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see "escaped paths" above). +func modPathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// importPathOK reports whether r can appear in a package import path element. +// +// Import paths are intermediate between module paths and file paths: we allow +// disallow characters that would be confusing or ambiguous as arguments to +// 'go get' (such as '@' and ' ' ), but allow certain characters that are +// otherwise-unambiguous on the command line and historically used for some +// binary names (such as '++' as a suffix for compiler binaries and wrappers). +func importPathOK(r rune) bool { + return modPathOK(r) || r == '+' +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "escaped paths" above. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + return strings.ContainsRune(allowed, r) + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by CheckImportPath, +// with three additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. +// Third, no path element may begin with a dot. +func CheckPath(path string) (err error) { + defer func() { + if err != nil { + err = &InvalidPathError{Kind: "module", Path: path, Err: err} + } + }() + + if err := checkPath(path, modulePath); err != nil { + return err + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("leading slash") + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("missing dot in first path element") + } + if path[0] == '-' { + return fmt.Errorf("leading dash in first path element") + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("invalid char %q in first path element", r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("invalid version") + } + return nil +} + +// CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. +// It must not end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). The element +// must not have a suffix of a tilde followed by one or more ASCII digits +// (to exclude paths elements that look like Windows short-names). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckImportPath(path string) error { + if err := checkPath(path, importPath); err != nil { + return &InvalidPathError{Kind: "import", Path: path, Err: err} + } + return nil +} + +// pathKind indicates what kind of path we're checking. Module paths, +// import paths, and file paths have different restrictions. +type pathKind int + +const ( + modulePath pathKind = iota + importPath + filePath +) + +// checkPath checks that a general path is valid. kind indicates what +// specific constraints should be applied. +// +// checkPath returns an error describing why the path is not valid. +// Because these checks apply to module, import, and file paths, +// and because other checks may be applied, the caller is expected to wrap +// this error with InvalidPathError. +func checkPath(path string, kind pathKind) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if path[0] == '-' && kind != filePath { + return fmt.Errorf("leading dash") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], kind); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], kind); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +func checkElem(elem string, kind pathKind) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && kind == modulePath { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + for _, r := range elem { + ok := false + switch kind { + case modulePath: + ok = modPathOK(r) + case importPath: + ok = importPathOK(r) + case filePath: + ok = fileNameOK(r) + default: + panic(fmt.Sprintf("internal error: invalid kind %v", kind)) + } + if !ok { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("%q disallowed as path element component on Windows", short) + } + } + + if kind == filePath { + // don't check for Windows short-names in file names. They're + // only an issue for import paths. + return nil + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. +func CheckFilePath(path string) error { + if err := checkPath(path, filePath); err != nil { + return &InvalidPathError{Kind: "file", Path: path, Err: err} + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return nil + } + m := semver.Major(v) + if pathMajor == "" { + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), + } +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that MatchPathMajor may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// EscapePath returns the escaped form of the given module path. +// It fails if the module path is invalid. +func EscapePath(path string) (escaped string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return escapeString(path) +} + +// EscapeVersion returns the escaped form of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EscapeVersion(v string) (escaped string, err error) { + if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } + } + return escapeString(v) +} + +func escapeString(s string) (escaped string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped module path %q", escaped) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) + } + return path, nil +} + +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) + if !ok { + return "", fmt.Errorf("invalid escaped version %q", escaped) + } + if err := checkElem(v, filePath); err != nil { + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) + } + return v, nil +} + +func unescapeString(escaped string) (string, bool) { + var buf []byte + + bang := false + for _, r := range escaped { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} + +// MatchPrefixPatterns reports whether any path prefix of target matches one of +// the glob patterns (as defined by path.Match) in the comma-separated globs +// list. This implements the algorithm used when matching a module path to the +// GOPRIVATE environment variable, as described by 'go help module-private'. +// +// It ignores any empty or malformed patterns in the list. +// Trailing slashes on patterns are ignored. +func MatchPrefixPatterns(globs, target string) bool { + for globs != "" { + // Extract next non-empty glob in comma-separated list. + var glob string + if i := strings.Index(globs, ","); i >= 0 { + glob, globs = globs[:i], globs[i+1:] + } else { + glob, globs = globs, "" + } + glob = strings.TrimSuffix(glob, "/") + if glob == "" { + continue + } + + // A glob with N+1 path elements (N slashes) needs to be matched + // against the first N+1 path elements of target, + // which end just before the N+1'th slash. + n := strings.Count(glob, "/") + prefix := target + // Walk target, counting slashes, truncating at the N+1'th slash. + for i := 0; i < len(target); i++ { + if target[i] == '/' { + if n == 0 { + prefix = target[:i] + break + } + n-- + } + } + if n > 0 { + // Not enough prefix elements. + continue + } + matched, _ := path.Match(glob, prefix) + if matched { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go new file mode 100644 index 0000000000..f04ad37886 --- /dev/null +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pseudo-versions +// +// Code authors are expected to tag the revisions they want users to use, +// including prereleases. However, not all authors tag versions at all, +// and not all commits a user might want to try will have tags. +// A pseudo-version is a version with a special form that allows us to +// address an untagged commit and order that version with respect to +// other versions we might encounter. +// +// A pseudo-version takes one of the general forms: +// +// (1) vX.0.0-yyyymmddhhmmss-abcdef123456 +// (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 +// (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible +// (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 +// (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible +// +// If there is no recently tagged version with the right major version vX, +// then form (1) is used, creating a space of pseudo-versions at the bottom +// of the vX version range, less than any tagged version, including the unlikely v0.0.0. +// +// If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, +// then the pseudo-version uses form (2) or (3), making it a prerelease for the next +// possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string +// ensures that the pseudo-version compares less than possible future explicit prereleases +// like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. +// +// If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, +// then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. + +package module + +import ( + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/mod/internal/lazyregexp" + "golang.org/x/mod/semver" +) + +var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) + +const PseudoVersionTimestampFormat = "20060102150405" + +// PseudoVersion returns a pseudo-version for the given major version ("v1") +// preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, +// and revision identifier (usually a 12-byte commit hash prefix). +func PseudoVersion(major, older string, t time.Time, rev string) string { + if major == "" { + major = "v0" + } + segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) + build := semver.Build(older) + older = semver.Canonical(older) + if older == "" { + return major + ".0.0-" + segment // form (1) + } + if semver.Prerelease(older) != "" { + return older + ".0." + segment + build // form (4), (5) + } + + // Form (2), (3). + // Extract patch from vMAJOR.MINOR.PATCH + i := strings.LastIndex(older, ".") + 1 + v, patch := older[:i], older[i:] + + // Reassemble. + return v + incDecimal(patch) + "-0." + segment + build +} + +// ZeroPseudoVersion returns a pseudo-version with a zero timestamp and +// revision, which may be used as a placeholder. +func ZeroPseudoVersion(major string) string { + return PseudoVersion(major, "", time.Time{}, "000000000000") +} + +// incDecimal returns the decimal string incremented by 1. +func incDecimal(decimal string) string { + // Scan right to left turning 9s to 0s until you find a digit to increment. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '9'; i-- { + digits[i] = '0' + } + if i >= 0 { + digits[i]++ + } else { + // digits is all zeros + digits[0] = '1' + digits = append(digits, '0') + } + return string(digits) +} + +// decDecimal returns the decimal string decremented by 1, or the empty string +// if the decimal is all zeroes. +func decDecimal(decimal string) string { + // Scan right to left turning 0s to 9s until you find a digit to decrement. + digits := []byte(decimal) + i := len(digits) - 1 + for ; i >= 0 && digits[i] == '0'; i-- { + digits[i] = '9' + } + if i < 0 { + // decimal is all zeros + return "" + } + if i == 0 && digits[i] == '1' && len(digits) > 1 { + digits = digits[1:] + } else { + digits[i]-- + } + return string(digits) +} + +// IsPseudoVersion reports whether v is a pseudo-version. +func IsPseudoVersion(v string) bool { + return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) +} + +// IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, +// timestamp, and revision, as returned by ZeroPseudoVersion. +func IsZeroPseudoVersion(v string) bool { + return v == ZeroPseudoVersion(semver.Major(v)) +} + +// PseudoVersionTime returns the time stamp of the pseudo-version v. +// It returns an error if v is not a pseudo-version or if the time stamp +// embedded in the pseudo-version is not a valid time. +func PseudoVersionTime(v string) (time.Time, error) { + _, timestamp, _, _, err := parsePseudoVersion(v) + if err != nil { + return time.Time{}, err + } + t, err := time.Parse("20060102150405", timestamp) + if err != nil { + return time.Time{}, &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("malformed time %q", timestamp), + } + } + return t, nil +} + +// PseudoVersionRev returns the revision identifier of the pseudo-version v. +// It returns an error if v is not a pseudo-version. +func PseudoVersionRev(v string) (rev string, err error) { + _, _, rev, _, err = parsePseudoVersion(v) + return +} + +// PseudoVersionBase returns the canonical parent version, if any, upon which +// the pseudo-version v is based. +// +// If v has no parent version (that is, if it is "vX.0.0-[…]"), +// PseudoVersionBase returns the empty string and a nil error. +func PseudoVersionBase(v string) (string, error) { + base, _, _, build, err := parsePseudoVersion(v) + if err != nil { + return "", err + } + + switch pre := semver.Prerelease(base); pre { + case "": + // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" + if build != "" { + // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible + // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, + // but the "+incompatible" suffix implies that the major version of + // the parent tag is not compatible with the module's import path. + // + // There are a few such entries in the index generated by proxy.golang.org, + // but we believe those entries were generated by the proxy itself. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("lacks base version, but has build metadata %q", build), + } + } + return "", nil + + case "-0": + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z + // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible + base = strings.TrimSuffix(base, pre) + i := strings.LastIndexByte(base, '.') + if i < 0 { + panic("base from parsePseudoVersion missing patch number: " + base) + } + patch := decDecimal(base[i+1:]) + if patch == "" { + // vX.0.0-0 is invalid, but has been observed in the wild in the index + // generated by requests to proxy.golang.org. + // + // NOTE(bcmills): I cannot find a historical bug that accounts for + // pseudo-versions of this form, nor have I seen such versions in any + // actual go.mod files. If we find actual examples of this form and a + // reasonable theory of how they came into existence, it seems fine to + // treat them as equivalent to vX.0.0 (especially since the invalid + // pseudo-versions have lower precedence than the real ones). For now, we + // reject them. + return "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: fmt.Errorf("version before %s would have negative patch number", base), + } + } + return base[:i+1] + patch + build, nil + + default: + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre + // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible + if !strings.HasSuffix(base, ".0") { + panic(`base from parsePseudoVersion missing ".0" before date: ` + base) + } + return strings.TrimSuffix(base, ".0") + build, nil + } +} + +var errPseudoSyntax = errors.New("syntax error") + +func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { + if !IsPseudoVersion(v) { + return "", "", "", "", &InvalidVersionError{ + Version: v, + Pseudo: true, + Err: errPseudoSyntax, + } + } + build = semver.Build(v) + v = strings.TrimSuffix(v, build) + j := strings.LastIndex(v, "-") + v, rev = v[:j], v[j+1:] + i := strings.LastIndex(v, "-") + if j := strings.LastIndex(v, "."); j > i { + base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" + timestamp = v[j+1:] + } else { + base = v[:i] // "vX.0.0" + timestamp = v[i+1:] + } + return base, timestamp, rev, build, nil +} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 0000000000..a30a22bf20 --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use Compare instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements sort.Interface for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using ByVersion. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index c15b8a7719..684d984fd9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err != nil { + if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - if p.breakErr != nil { - p.unread += len(d) - return len(d), nil // discard when there is no reader - } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 8cb14f3c97..cd057f3982 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error { } if len(data) > 0 { + st.bodyBytes += int64(len(data)) wrote, err := st.body.Write(data) if err != nil { + // The handler has closed the request body. + // Return the connection-level flow control for the discarded data, + // but not the stream-level flow control. sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) + return nil } if wrote != len(data) { panic("internal error: bad Writer") } - st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 05ba23d3d9..ac90a2631c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { + roundTripErr := err if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue } backoff := float64(uint(1) << (uint(retry) - 1)) @@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res timer := backoffNewTimer(d) select { case <-timer.C: - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): timer.Stop() @@ -1265,6 +1266,27 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return res, nil } + cancelRequest := func(cs *clientStream, err error) error { + cs.cc.mu.Lock() + defer cs.cc.mu.Unlock() + cs.abortStreamLocked(err) + if cs.ID != 0 { + // This request may have failed because of a problem with the connection, + // or for some unrelated reason. (For example, the user might have canceled + // the request without waiting for a response.) Mark the connection as + // not reusable, since trying to reuse a dead connection is worse than + // unnecessarily creating a new one. + // + // If cs.ID is 0, then the request was never allocated a stream ID and + // whatever went wrong was unrelated to the connection. We might have + // timed out waiting for a stream slot when StrictMaxConcurrentStreams + // is set, for example, in which case retrying on a different connection + // will not help. + cs.cc.doNotReuse = true + } + return err + } + for { select { case <-cs.respHeaderRecv: @@ -1279,15 +1301,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return handleResponseHeaders() default: waitDone() - return nil, cs.abortErr + return nil, cancelRequest(cs, cs.abortErr) } case <-ctx.Done(): - err := ctx.Err() - cs.abortStream(err) - return nil, err + return nil, cancelRequest(cs, ctx.Err()) case <-cs.reqCancel: - cs.abortStream(errRequestCanceled) - return nil, errRequestCanceled + return nil, cancelRequest(cs, errRequestCanceled) } } } @@ -2555,6 +2574,9 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + unread := cs.bufPipe.Len() if unread > 0 { cc.mu.Lock() @@ -2573,9 +2595,6 @@ func (b transportResponseBody) Close() error { cc.wmu.Unlock() } - cs.bufPipe.BreakWithError(errClosedResponseBody) - cs.abortStream(errClosedResponseBody) - select { case <-cs.donec: case <-cs.ctx.Done(): diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index f3baa37932..1d9d91f3ed 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -24,6 +24,21 @@ var hwCap uint var hwCap2 uint func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + buf, err := ioutil.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 0000000000..5f92ac9a2e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 0000000000..b975ea2a04 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 0000000000..7def9580e6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 76% rename from vendor/golang.org/x/sys/unix/ioctl.go rename to vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 1c51b0ec2b..649913d1ea 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,13 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix import ( - "runtime" "unsafe" ) @@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d91d..cdc21bf76d 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,25 +17,23 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -49,22 +47,22 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9ddde..be0423e685 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -517,10 +519,11 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7f..39dba6ca6a 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { + return ptrace1Ptr(request, pid, addr, data) +} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a011..9ea66330a9 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + return ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51e99..c406ae00f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -410,7 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be163..f2871fa953 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed17098..75718ec0f1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda42671f1..7705c3270b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 192b071b3d..206921504c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,7 +14,6 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) @@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -394,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL @@ -617,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -626,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -680,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9b..9fa879806b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec99630..f17b8c526a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a41111a794..d4ce988e72 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -325,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d50b9dc250..afb10106f6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) @@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) { } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { return ptrace(PT_STEP, pid, 1, 0) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ @@ -402,7 +433,6 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a91d471d0..b8da510043 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 48110a0abb..47155c4839 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 52f1d4b75a..08932093fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 5537ee4f2e..d151a0d0e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index 164abd5d21..d5cd64b378 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 4ffb64808d..381fd4673b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { } return } + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5443dddd48..fbaeb5fff1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -1869,7 +1873,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1883,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. @@ -2154,6 +2166,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1< 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2502,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0eb9..4baaed0bc1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 26a0fdc505..51d6f3fb25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -725,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -1984,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2115,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2502,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c987..c3b82c0379 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 54749f9c5e..0eabac7ade 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -436,6 +436,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1400,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 77479d4581..ee313eb007 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2e966d4d7a..4c986e448e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d65a7c0fa6..555216944a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 6f0b97c6db..67a226fbf5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index e1c23b5272..f0b9ddaaa2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1625,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 36ea3a55b7..da63d9d782 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) @@ -1336,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad477..07b549cc25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7f4..5f481bf83f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f16..824cd52c7f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f214..e77aecfe98 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb1c..961a3afb7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a40..ed05005e91 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4dadb..d365b718f3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f07..c3f1b8bbde 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432aed5..a6574cf98b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7baf..f40990264f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd61..9dfcc29974 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d23..0b29239583 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfecd6..6cde32237d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f891..5253d65bf1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 79f7389963..cdb2af5ae0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fb161f3a26..9d25f76b0b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 4c8ac993a8..d3f8035169 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 76dd8ec4fd..887188a529 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -1597,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index caeb807bd4..6699a783e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 087444250c..04f0de34b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index a05e5f4fff..1e775fe057 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd1084..27b6f4df74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index b2da8e50cc..7f6427899a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420c9..b797045fd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 048b2655e6..756ef7b173 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e0a..a871266221 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 6f33e37e72..7bc2e24eb9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af27263a..05d4bffd79 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 330cf7f7ac..739be6217a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b0d..74a25f8d64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -687,12 +687,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 5f24de0d9d..7d95a19780 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" @@ -1886,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4dfa8..990be24574 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 78d4a4240e..609d1c598a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,18 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1639,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f2079457c6..c31681743c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,17 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f0991..690cefc3d0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa775219..5bffc10eac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index d9c78cdcbc..29dc483378 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -362,7 +362,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 26991b1655..0a89b28906 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -367,7 +367,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index f8324e7e7f..c8666bb152 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -350,7 +350,7 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 4220411f34..88fb48a887 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -347,7 +347,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 0660fd45c7..698dc975e9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -348,7 +348,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7d9fc8f1c9..ca84727cfe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -456,36 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -528,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1043,6 +1067,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1239,7 +1264,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1939,7 +1964,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x1a NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2443,9 +2472,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3265,7 +3296,7 @@ const ( DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae DEVLINK_ATTR_NESTED_DEVLINK = 0xaf DEVLINK_ATTR_SELFTESTS = 0xb0 - DEVLINK_ATTR_MAX = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3281,7 +3312,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -3572,7 +3604,8 @@ const ( ETHTOOL_MSG_MODULE_SET = 0x23 ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 - ETHTOOL_MSG_USER_MAX = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x26 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3611,7 +3644,8 @@ const ( ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 - ETHTOOL_MSG_KERNEL_MAX = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x26 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3679,7 +3713,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -4409,7 +4444,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x140 + NL80211_ATTR_MAX = 0x141 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4552,6 +4587,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -5752,3 +5788,25 @@ const ( AUDIT_NLGRP_NONE = 0x0 AUDIT_NLGRP_READLOG = 0x1 ) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 89c516a29a..4ecc1495cd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -414,7 +414,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 62b4fb2699..34fddff964 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -427,7 +427,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e86b35893e..3b14a6031f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -405,7 +405,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6c6be4c911..0517651ab3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -406,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 4982ea355a..3b0c518134 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -407,7 +407,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 173141a670..fccdf4dd0f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 93ae4c5167..500de8fc07 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 4e4e510ca5..d0434cd2c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 3f5ba013d9..84206ba534 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 71dfe7cdb4..ab078cf1f5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -417,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3a2b7f0a66..42eb2c4cef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index a52d627563..31304a4e8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dfc007d8a6..c311f9612d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -434,7 +434,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b53cb9103d..bba3cefac1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -429,7 +429,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index fe0aa35472..ad8a013804 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -411,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index d69ccb4f97..487335d14d 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -13,7 +13,7 @@ import "encoding/binary" // a rune to a uint16. The values take two forms. For v >= 0x8000: // bits // 15: 1 (inverse of NFD_QC bit of qcInfo) -// 13..7: qcInfo (see below). isYesD is always true (no decompostion). +// 13..7: qcInfo (see below). isYesD is always true (no decomposition). // 6..0: ccc (compressed CCC value). // For v < 0x8000, the respective rune has a decomposition and v is an index // into a byte array of UTF-8 decomposition sequences and additional info and diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14..02f5dc5318 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5cc..29475e31c9 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f4f9408f38..392b21fb2d 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -110,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -371,3 +376,21 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e8dfc828aa..3929c26d31 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go index a87b6809af..c334135810 100644 --- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct { // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { @@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numIdle += updateVal } } + return cse.CurrentState() +} +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { // Evaluate. if cse.numReady > 0 { return connectivity.Ready diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index b1c23eaae0..0359956d36 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,17 +19,20 @@ package grpc import ( + "context" "fmt" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() @@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf..64a232f281 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -261,6 +261,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +695,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go index a220c47c59..32b7fa5794 100644 --- a/vendor/google.golang.org/grpc/channelz/channelz.go +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -23,7 +23,7 @@ // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by // the `internal/channelz` package. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 779b03bca1..422639c79d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -503,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -831,9 +831,9 @@ func equalAddresses(a, b []resolver.Address) bool { // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -998,7 +998,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1228,38 +1228,33 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := grpcsync.OnceFunc(func() { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - defer hcancel() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. return } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + }) onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) @@ -1271,7 +1266,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. hcancel() @@ -1279,60 +1274,34 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1583,7 +1552,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1604,9 +1573,9 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e7..5feac3aa0e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 784822d056..ce2bbc10a1 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 18e530fc90..711763d54f 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,7 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) } // GetCompressor returns Compressor for the given compressor name. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f664090..b5560b47ec 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -242,7 +242,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index c5579e6506..f9e80e27ab 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index ad0ce4dabf..7b2f350e2e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f02725431..7edd196bd3 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,15 @@ import ( ) const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + prefix = "GRPC_GO_" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" + advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" ) var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258f..b68e26a364 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 0000000000..6635f7bca9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 0000000000..9f40909679 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index e9c4af6483..ec62b4775e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597..51e733e495 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd..b0ead4f54f 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 090120925b..fb272235d8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -442,10 +442,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5c2f35b24e..d518b07e16 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -99,16 +101,13 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables @@ -194,7 +193,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -216,12 +215,35 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if connectCtx.Err() != nil { + // connectCtx expired before exiting the function. Hard close the connection. + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -253,15 +275,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -299,6 +313,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -315,16 +330,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts kp: kp, statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -363,21 +377,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -397,14 +422,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -507,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -589,7 +625,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -618,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -634,13 +681,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error @@ -880,19 +927,15 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. t.onClose() t.state = closing streams := t.activeStreams @@ -1482,33 +1525,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6c3ba85159..2e615ee20c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -573,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 98d62e0675..fb4a88f59b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,10 +41,11 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Key must be lower-case. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 843633c910..a5d5516ee0 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -129,8 +130,12 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad9..cd45547854 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 1f859f7648..c22f9a52db 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -53,6 +53,7 @@ type ServerReflectionRequest struct { // defined field and then handles them using corresponding methods. // // Types that are assignable to MessageRequest: + // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol // *ServerReflectionRequest_FileContainingExtension @@ -263,6 +264,7 @@ type ServerReflectionResponse struct { // message_request in the request. // // Types that are assignable to MessageResponse: + // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 81344abd77..0b41783aa5 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -23,6 +23,7 @@ The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" s := grpc.NewServer() @@ -32,7 +33,6 @@ To register server reflection on a gRPC server: reflection.Register(s) s.Serve(lis) - */ package reflection // import "google.golang.org/grpc/reflection" @@ -74,7 +74,7 @@ func Register(s GRPCServer) { // for a custom implementation to return zero values for the // grpc.ServiceInfo values in the map. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -85,7 +85,7 @@ type ServiceInfoProvider interface { // ExtensionResolver is the interface used to query details about extensions. // This interface is satisfied by protoregistry.GlobalTypes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -96,7 +96,7 @@ type ExtensionResolver interface { // ServerOptions represents the options used to construct a reflection server. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -120,7 +120,7 @@ type ServerOptions struct { // This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // -// Experimental +// # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index ca2e35a359..967cbc7373 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -96,7 +96,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -236,12 +236,12 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b..934fc1aa01 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -328,7 +328,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +351,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +369,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +379,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +416,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +444,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +455,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +480,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +497,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +508,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +548,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f92661..35e7a20a04 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 6d163b6e38..623be39f26 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0c16cfb2ea..960c3e33df 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -39,6 +39,7 @@ import ( imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -744,17 +752,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5..bfa5dfa40e 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d472ca6430..2198e7098d 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.50.1" +const Version = "1.51.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index c3fc8253b1..bd8e0cdb33 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -67,7 +67,9 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +83,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" diff --git a/vendor/modules.txt b/vendor/modules.txt index 39081c5270..4fbb3a6084 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,15 +1,9 @@ # github.com/agext/levenshtein v1.2.3 ## explicit github.com/agext/levenshtein -# github.com/apparentlymart/go-cidr v1.1.0 -## explicit -github.com/apparentlymart/go-cidr/cidr # github.com/apparentlymart/go-textseg/v13 v13.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/davecgh/go-spew v1.1.1 -## explicit -github.com/davecgh/go-spew/spew # github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color @@ -32,10 +26,10 @@ github.com/google/go-cmp/cmp/internal/value # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap -# github.com/hashicorp/go-azure-helpers v0.55.0 +# github.com/hashicorp/go-azure-helpers v0.56.0 ## explicit; go 1.19 github.com/hashicorp/go-azure-helpers/lang/pointer -# github.com/hashicorp/go-azure-sdk v0.20230331.1143618 +# github.com/hashicorp/go-azure-sdk v0.20230511.1094507 ## explicit; go 1.19 github.com/hashicorp/go-azure-sdk/sdk/auth github.com/hashicorp/go-azure-sdk/sdk/claims @@ -57,13 +51,13 @@ github.com/hashicorp/go-cty/cty/gocty github.com/hashicorp/go-cty/cty/json github.com/hashicorp/go-cty/cty/msgpack github.com/hashicorp/go-cty/cty/set -# github.com/hashicorp/go-hclog v1.2.1 +# github.com/hashicorp/go-hclog v1.4.0 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.4.6 +# github.com/hashicorp/go-plugin v1.4.8 ## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/plugin @@ -76,7 +70,7 @@ github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version -# github.com/hashicorp/hc-install v0.4.0 +# github.com/hashicorp/hc-install v0.5.0 ## explicit; go 1.16 github.com/hashicorp/hc-install github.com/hashicorp/hc-install/checkpoint @@ -88,11 +82,11 @@ github.com/hashicorp/hc-install/internal/pubkey github.com/hashicorp/hc-install/internal/releasesjson github.com/hashicorp/hc-install/internal/src github.com/hashicorp/hc-install/internal/validators -github.com/hashicorp/hc-install/internal/version github.com/hashicorp/hc-install/product github.com/hashicorp/hc-install/releases github.com/hashicorp/hc-install/src -# github.com/hashicorp/hcl/v2 v2.15.0 +github.com/hashicorp/hc-install/version +# github.com/hashicorp/hcl/v2 v2.16.2 ## explicit; go 1.18 github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode @@ -100,14 +94,14 @@ github.com/hashicorp/hcl/v2/hclsyntax # github.com/hashicorp/logutils v1.0.0 ## explicit github.com/hashicorp/logutils -# github.com/hashicorp/terraform-exec v0.17.3 +# github.com/hashicorp/terraform-exec v0.18.1 ## explicit; go 1.18 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -# github.com/hashicorp/terraform-json v0.14.0 -## explicit; go 1.13 +# github.com/hashicorp/terraform-json v0.16.0 +## explicit; go 1.18 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-go v0.14.1 +# github.com/hashicorp/terraform-plugin-go v0.14.3 ## explicit; go 1.18 github.com/hashicorp/terraform-plugin-go/internal/logging github.com/hashicorp/terraform-plugin-go/tfprotov5 @@ -125,19 +119,21 @@ github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6 github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server github.com/hashicorp/terraform-plugin-go/tftypes -# github.com/hashicorp/terraform-plugin-log v0.7.0 -## explicit; go 1.17 +# github.com/hashicorp/terraform-plugin-log v0.8.0 +## explicit; go 1.18 github.com/hashicorp/terraform-plugin-log/internal/fieldutils github.com/hashicorp/terraform-plugin-log/internal/hclogutils github.com/hashicorp/terraform-plugin-log/internal/logging github.com/hashicorp/terraform-plugin-log/tflog github.com/hashicorp/terraform-plugin-log/tfsdklog -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 -## explicit; go 1.18 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1 +## explicit; go 1.19 github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest +github.com/hashicorp/terraform-plugin-sdk/v2/helper/id github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource +github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation @@ -153,7 +149,7 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c +# github.com/hashicorp/terraform-registry-address v0.1.0 ## explicit; go 1.14 github.com/hashicorp/terraform-registry-address # github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 @@ -204,16 +200,17 @@ github.com/vmihailenco/msgpack/v4/codes github.com/vmihailenco/tagparser github.com/vmihailenco/tagparser/internal github.com/vmihailenco/tagparser/internal/parser -# github.com/zclconf/go-cty v1.12.1 +# github.com/zclconf/go-cty v1.13.1 ## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert +github.com/zclconf/go-cty/cty/ctystrings github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/set -# golang.org/x/crypto v0.5.0 +# golang.org/x/crypto v0.9.0 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -232,7 +229,13 @@ golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/net v0.7.0 +# golang.org/x/mod v0.8.0 +## explicit; go 1.17 +golang.org/x/mod/internal/lazyregexp +golang.org/x/mod/modfile +golang.org/x/mod/module +golang.org/x/mod/semver +# golang.org/x/net v0.10.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -246,11 +249,11 @@ golang.org/x/net/trace ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.5.0 +# golang.org/x/sys v0.8.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/unix -# golang.org/x/text v0.7.0 +# golang.org/x/text v0.9.0 ## explicit; go 1.17 golang.org/x/text/internal/language golang.org/x/text/internal/language/compact @@ -278,7 +281,7 @@ google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac ## explicit; go 1.15 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.50.1 +# google.golang.org/grpc v1.51.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes