diff --git a/go.mod b/go.mod index 7f8c0a3457..9fe472b89f 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/Azure/go-autorest v11.5.1+incompatible // indirect github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 github.com/NYTimes/gziphandler v1.1.1 + github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/aws/aws-sdk-go v1.23.12 github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect github.com/blang/semver v3.5.0+incompatible @@ -15,12 +16,10 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/cenkalti/backoff v1.0.0 // indirect github.com/cespare/xxhash v1.1.0 - github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/cznic/ql v1.2.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fluent/fluent-logger-golang v1.2.1 // indirect github.com/fsouza/fake-gcs-server v1.3.0 @@ -48,6 +47,7 @@ require ( github.com/lib/pq v1.0.0 github.com/mattes/migrate v1.3.1 github.com/mattn/go-sqlite3 v1.10.0 // indirect + github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 github.com/opentracing/opentracing-go v1.1.0 @@ -62,11 +62,10 @@ require ( github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sercand/kuberesolver v2.1.0+incompatible // indirect github.com/stretchr/testify v1.4.0 + github.com/thanos-io/thanos v0.7.0 github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - github.com/uber-go/atomic v1.3.2 // indirect github.com/uber/jaeger-client-go v2.16.0+incompatible - github.com/uber/jaeger-lib v2.0.0+incompatible // indirect github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4 github.com/weaveworks/promrus v1.2.0 // indirect diff --git a/go.sum b/go.sum index 0861b2947b..bdb63bba7f 100644 --- a/go.sum +++ b/go.sum @@ -7,9 +7,13 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A contrib.go.opencensus.io/exporter/ocagent v0.6.0 h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= contrib.go.opencensus.io/exporter/stackdriver v0.6.0/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw= +github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v26.3.0+incompatible h1:w/tfbWIy9a8SSNJFwcapWeOfknQXDYBVjh5UkuIr+NA= github.com/Azure/azure-sdk-for-go v26.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= +github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.5.1+incompatible h1:tdB6TZ8w2B7+F8wD6eTQSXXQo31zKKL55b6uqNDAGKw= @@ -32,7 +36,9 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845/go.mod h1:c8Mh99Cw82nrsAnPgxQSZHkswVOJF7/MqZb1ZdvriLM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= @@ -134,12 +140,16 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA= github.com/fluent/fluent-logger-golang v1.2.1 h1:CMA+mw2zMiOGEOarZtaqM3GBWT1IVLNncNi0nKELtmU= github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.3.0 h1:f2mbomatUsbw8NRY7rzqiiWNn4BRM+Jredz0Pt70Usg= github.com/fsouza/fake-gcs-server v1.3.0/go.mod h1:Lq+43m2znsXfDKHnQMfdA0HpYYAEJsfizsbpk5k3TLo= +github.com/gernest/wow v0.1.0/go.mod h1:dEPabJRi5BneI1Nev1VWo0ZlcTWibHWp43qxKms4elY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -252,6 +262,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= @@ -267,6 +279,8 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -277,6 +291,8 @@ github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= @@ -285,8 +301,10 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20181025070259-68e3a13e4117/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= @@ -356,6 +374,8 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -380,8 +400,11 @@ github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 h1:yZJImkCmVI6d1uJ9KR github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= +github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -391,9 +414,14 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mattes/migrate v1.3.1 h1:kaUHjsvvmhGIkt9WVaEn36Z08+CaHAcXWcnZj3JpSaY= github.com/mattes/migrate v1.3.1/go.mod h1:LJcqgpj1jQoxv3m2VXd3drv0suK5CbN/RCX7MXwgnVI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw= +github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -402,6 +430,9 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/cli v1.20.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= +github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed h1:g3DRJpu22jEjs14fSeJ7Crn9vdreiRsn4RtrEsXH/6A= +github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed/go.mod h1:vaNT59cWULS37E+E9zkuN/BVnKHyXtVGS+b04Boc66Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -421,6 +452,10 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mozillazg/go-cos v0.12.0 h1:b9hUd5HjrDe10BUfkyiLYI1+z4M2kAgKasktszx9pO4= +github.com/mozillazg/go-cos v0.12.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= +github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= +github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= @@ -432,6 +467,7 @@ github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= @@ -445,6 +481,7 @@ github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5 github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= @@ -495,6 +532,7 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f h1:7C9G4yUogM8QP85pmf11vlBPuV6u2mPbqvbjPVKcNis= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= +github.com/prometheus/prometheus v1.8.2-0.20190819201610-48b2c9c8eae2/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 h1:W4dTblzSVIBNfDimJhh70OpZQQMwLVpwK50scXdH94w= github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0/go.mod h1:elNqjVbwD3sCZJqKzyN7uEuwGcCpeJvv67D6BrHsDbw= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= @@ -525,6 +563,10 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -545,6 +587,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/thanos-io/thanos v0.7.0 h1:zAHD2IVQUBS4QbAHum0IFqXVFUZm39Q34OAe8p4AqtU= +github.com/thanos-io/thanos v0.7.0/go.mod h1:dF35hGQW3F8iBXwc2/F7e29dsNGW0RGmKYC3D+0F83Y= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d h1:Ninez2SUm08xpmnw7kVxCeOc3DahF6IuMuRMCdM4wTQ= @@ -553,8 +597,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= -github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= +github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY= github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= @@ -582,6 +626,9 @@ go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -591,9 +638,11 @@ golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -627,6 +676,7 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -653,6 +703,7 @@ golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -693,6 +744,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -735,6 +787,7 @@ google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -750,9 +803,12 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= diff --git a/pkg/chunk/storage/factory.go b/pkg/chunk/storage/factory.go index cce20b93e1..bb106675bb 100644 --- a/pkg/chunk/storage/factory.go +++ b/pkg/chunk/storage/factory.go @@ -18,6 +18,12 @@ import ( "github.com/pkg/errors" ) +// Supported storage engines +const ( + StorageEngineChunks = "chunks" + StorageEngineTSDB = "tsdb" +) + // StoreLimits helps get Limits specific to Queries for Stores type StoreLimits interface { CardinalityLimit(userID string) int @@ -27,6 +33,7 @@ type StoreLimits interface { // Config chooses which storage client to use. type Config struct { + Engine string `yaml:"engine"` AWSStorageConfig aws.StorageConfig `yaml:"aws"` GCPStorageConfig gcp.Config `yaml:"bigtable"` GCSConfig gcp.GCSConfig `yaml:"gcs"` @@ -48,10 +55,20 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.BoltDBConfig.RegisterFlags(f) cfg.FSConfig.RegisterFlags(f) + f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or tsdb. Be aware tsdb is experimental and shouldn't be used in production.") cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") } +// Validate config and returns error on failure +func (cfg *Config) Validate() error { + if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineTSDB { + return errors.New("unsupported storage engine") + } + + return nil +} + // NewStore makes the storage clients based on the configuration. func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConfig, limits StoreLimits) (chunk.Store, error) { tieredCache, err := cache.New(cfg.IndexQueriesCacheConfig) diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 196579d73a..ffbd320a97 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -27,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -69,6 +70,7 @@ type Config struct { Frontend frontend.Config `yaml:"frontend,omitempty"` TableManager chunk.TableManagerConfig `yaml:"table_manager,omitempty"` Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + TSDB tsdb.Config `yaml:"tsdb"` Ruler ruler.Config `yaml:"ruler,omitempty"` ConfigDB db.Config `yaml:"configdb,omitempty"` @@ -100,6 +102,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Frontend.RegisterFlags(f) c.TableManager.RegisterFlags(f) c.Encoding.RegisterFlags(f) + c.TSDB.RegisterFlags(f) c.Ruler.RegisterFlags(f) c.ConfigDB.RegisterFlags(f) @@ -119,6 +122,14 @@ func (c *Config) Validate() error { if err := c.Encoding.Validate(); err != nil { return errors.Wrap(err, "invalid encoding config") } + + if err := c.Storage.Validate(); err != nil { + return errors.Wrap(err, "invalid storage config") + } + + if err := c.TSDB.Validate(); err != nil { + return errors.Wrap(err, "invalid TSDB config") + } return nil } diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index bcf5a3e0ed..312b71ae06 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -12,6 +12,7 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/thanos-io/thanos/pkg/objstore/s3" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" @@ -193,7 +194,25 @@ func (t *Cortex) initQuerier(cfg *Config) (err error) { return } - queryable, engine := querier.New(cfg.Querier, t.distributor, t.store) + var store querier.ChunkStore + + if cfg.Storage.Engine == storage.StorageEngineTSDB { + s3cfg := s3.Config{ + Bucket: cfg.TSDB.S3.BucketName, + Endpoint: cfg.TSDB.S3.Endpoint, + AccessKey: cfg.TSDB.S3.AccessKeyID, + SecretKey: cfg.TSDB.S3.SecretAccessKey, + Insecure: cfg.TSDB.S3.Insecure, + } + store, err = querier.NewBlockQuerier(s3cfg, cfg.TSDB.SyncDir, prometheus.DefaultRegisterer) + if err != nil { + return err + } + } else { + store = t.store + } + + queryable, engine := querier.New(cfg.Querier, t.distributor, store) api := v1.NewAPI( engine, queryable, @@ -228,6 +247,9 @@ func (t *Cortex) stopQuerier() error { func (t *Cortex) initIngester(cfg *Config) (err error) { cfg.Ingester.LifecyclerConfig.ListenPort = &cfg.Server.GRPCListenPort + cfg.Ingester.TSDBEnabled = cfg.Storage.Engine == storage.StorageEngineTSDB + cfg.Ingester.TSDBConfig = cfg.TSDB + t.ingester, err = ingester.New(cfg.Ingester, cfg.IngesterClient, t.overrides, t.store, prometheus.DefaultRegisterer) if err != nil { return @@ -246,6 +268,9 @@ func (t *Cortex) stopIngester() error { } func (t *Cortex) initStore(cfg *Config) (err error) { + if cfg.Storage.Engine == storage.StorageEngineTSDB { + return nil + } err = cfg.Schema.Load() if err != nil { return @@ -256,7 +281,9 @@ func (t *Cortex) initStore(cfg *Config) (err error) { } func (t *Cortex) stopStore() error { - t.store.Stop() + if t.store != nil { + t.store.Stop() + } return nil } @@ -281,6 +308,10 @@ func (t *Cortex) stopQueryFrontend() (err error) { } func (t *Cortex) initTableManager(cfg *Config) error { + if cfg.Storage.Engine == storage.StorageEngineTSDB { + return nil // table manager isn't used in v2 + } + err := cfg.Schema.Load() if err != nil { return err @@ -319,7 +350,10 @@ func (t *Cortex) initTableManager(cfg *Config) error { } func (t *Cortex) stopTableManager() error { - t.tableManager.Stop() + if t.tableManager != nil { + t.tableManager.Stop() + } + return nil } diff --git a/pkg/ingester/bucket.go b/pkg/ingester/bucket.go new file mode 100644 index 0000000000..90fb4d1fac --- /dev/null +++ b/pkg/ingester/bucket.go @@ -0,0 +1,68 @@ +package ingester + +import ( + "fmt" + "io" + "strings" + + "github.com/thanos-io/thanos/pkg/objstore" + "golang.org/x/net/context" +) + +// Bucket is a wrapper around a objstore.Bucket that prepends writes with a userID +type Bucket struct { + UserID string + Bucket objstore.Bucket +} + +func (b *Bucket) fullName(name string) string { + return fmt.Sprintf("%s/%s", b.UserID, name) +} + +// Close implements io.Closer +func (b *Bucket) Close() error { return b.Bucket.Close() } + +// Upload the contents of the reader as an object into the bucket. +func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { + return b.Bucket.Upload(ctx, b.fullName(name), r) +} + +// Delete removes the object with the given name. +func (b *Bucket) Delete(ctx context.Context, name string) error { + return b.Bucket.Delete(ctx, b.fullName(name)) +} + +// Name returns the bucket name for the provider. +func (b *Bucket) Name() string { return b.Bucket.Name() } + +// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full +// object name including the prefix of the inspected directory. +func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) error { + return b.Bucket.Iter(ctx, b.fullName(dir), func(s string) error { + /* + Since all objects are prefixed with the userID we need to strip the userID + upon passing to the processing function + */ + return f(strings.Join(strings.Split(s, "/")[1:], "/")) + }) +} + +// Get returns a reader for the given object name. +func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + return b.Bucket.Get(ctx, b.fullName(name)) +} + +// GetRange returns a new range reader for the given object name and range. +func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + return b.Bucket.GetRange(ctx, b.fullName(name), off, length) +} + +// Exists checks if the given object exists in the bucket. +func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { + return b.Bucket.Exists(ctx, b.fullName(name)) +} + +// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. +func (b *Bucket) IsObjNotFoundErr(err error) bool { + return b.Bucket.IsObjNotFoundErr(err) +} diff --git a/pkg/ingester/client/cortex.pb.go b/pkg/ingester/client/cortex.pb.go index 0024675d96..c97908a367 100644 --- a/pkg/ingester/client/cortex.pb.go +++ b/pkg/ingester/client/cortex.pb.go @@ -1297,6 +1297,108 @@ func (m *LabelMatcher) GetValue() string { return "" } +type TimeSeriesFile struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } +func (*TimeSeriesFile) ProtoMessage() {} +func (*TimeSeriesFile) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{26} +} +func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesFile.Merge(m, src) +} +func (m *TimeSeriesFile) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesFile) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo + +func (m *TimeSeriesFile) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesFile) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesFile) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *TimeSeriesFile) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferTSDBResponse struct { +} + +func (m *TransferTSDBResponse) Reset() { *m = TransferTSDBResponse{} } +func (*TransferTSDBResponse) ProtoMessage() {} +func (*TransferTSDBResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{27} +} +func (m *TransferTSDBResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferTSDBResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferTSDBResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferTSDBResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferTSDBResponse.Merge(m, src) +} +func (m *TransferTSDBResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferTSDBResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferTSDBResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferTSDBResponse proto.InternalMessageInfo + func init() { proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) @@ -1326,88 +1428,94 @@ func init() { proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") proto.RegisterType((*Metric)(nil), "cortex.Metric") proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") + proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile") + proto.RegisterType((*TransferTSDBResponse)(nil), "cortex.TransferTSDBResponse") } func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xdf, 0x8d, 0xff, 0x24, 0x7e, 0x5e, 0xbb, 0xce, 0x24, 0xa5, 0xae, 0x2b, 0xd6, 0x65, 0xa4, - 0x96, 0x08, 0xa8, 0x5b, 0x52, 0x15, 0x7a, 0xa0, 0xaa, 0x9c, 0xd6, 0x6d, 0x8d, 0x92, 0x34, 0x1d, - 0xbb, 0x80, 0x90, 0x90, 0xb5, 0xb1, 0xa7, 0xc9, 0x8a, 0xfd, 0xe3, 0xee, 0xcc, 0x22, 0x7a, 0x40, - 0xe2, 0x1b, 0xc0, 0x11, 0xbe, 0x01, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8e, 0x15, 0x87, 0x8a, - 0x38, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xae, 0x2d, 0x22, 0x50, 0x6f, 0x9e, - 0xf7, 0x7e, 0xef, 0x37, 0x6f, 0xde, 0xdf, 0x35, 0x18, 0x43, 0x3f, 0xe0, 0xf4, 0xdb, 0xd6, 0x38, - 0xf0, 0xb9, 0x8f, 0x8a, 0xf2, 0xd4, 0xb8, 0x72, 0x60, 0xf3, 0xc3, 0x70, 0xbf, 0x35, 0xf4, 0xdd, - 0xab, 0x07, 0xfe, 0x81, 0x7f, 0x55, 0xa8, 0xf7, 0xc3, 0x27, 0xe2, 0x24, 0x0e, 0xe2, 0x97, 0x34, - 0xc3, 0xbf, 0xeb, 0x60, 0x7c, 0x1e, 0xd8, 0x9c, 0x12, 0xfa, 0x34, 0xa4, 0x8c, 0xa3, 0x5d, 0x00, - 0x6e, 0xbb, 0x94, 0xd1, 0xc0, 0xa6, 0xac, 0xae, 0x5f, 0xcc, 0x6d, 0x94, 0x37, 0x51, 0x4b, 0x5d, - 0xd5, 0xb7, 0x5d, 0xda, 0x13, 0x9a, 0xad, 0xc6, 0xf3, 0x97, 0x4d, 0xed, 0xcf, 0x97, 0x4d, 0xb4, - 0x17, 0x50, 0xcb, 0x71, 0xfc, 0x61, 0x3f, 0xb1, 0x22, 0x29, 0x06, 0xf4, 0x31, 0x14, 0x7b, 0x7e, - 0x18, 0x0c, 0x69, 0x7d, 0xe9, 0xa2, 0xbe, 0x51, 0xdd, 0x6c, 0xc6, 0x5c, 0xe9, 0x5b, 0x5b, 0x12, - 0xd2, 0xf1, 0x42, 0x97, 0x14, 0x99, 0xf8, 0x8d, 0x9b, 0x00, 0x53, 0x29, 0x5a, 0x86, 0x5c, 0x7b, - 0xaf, 0x5b, 0xd3, 0xd0, 0x0a, 0xe4, 0xc9, 0xe3, 0xed, 0x4e, 0x4d, 0xc7, 0x67, 0xa0, 0xa2, 0x38, - 0xd8, 0xd8, 0xf7, 0x18, 0xc5, 0xb7, 0xa0, 0x4c, 0xa8, 0x35, 0x8a, 0x5f, 0xd2, 0x82, 0xe5, 0xa7, - 0x61, 0xfa, 0x19, 0xeb, 0xf1, 0xd5, 0x8f, 0x42, 0x1a, 0x3c, 0x53, 0x30, 0x12, 0x83, 0xf0, 0x6d, - 0x30, 0xa4, 0xb9, 0xa4, 0x43, 0x57, 0x61, 0x39, 0xa0, 0x2c, 0x74, 0x78, 0x6c, 0x7f, 0x76, 0xc6, - 0x5e, 0xe2, 0x48, 0x8c, 0xc2, 0x3f, 0xe9, 0x60, 0xa4, 0xa9, 0xd1, 0x07, 0x80, 0x18, 0xb7, 0x02, - 0x3e, 0x10, 0xf1, 0xe0, 0x96, 0x3b, 0x1e, 0xb8, 0x11, 0x99, 0xbe, 0x91, 0x23, 0x35, 0xa1, 0xe9, - 0xc7, 0x8a, 0x1d, 0x86, 0x36, 0xa0, 0x46, 0xbd, 0x51, 0x16, 0xbb, 0x24, 0xb0, 0x55, 0xea, 0x8d, - 0xd2, 0xc8, 0x6b, 0xb0, 0xe2, 0x5a, 0x7c, 0x78, 0x48, 0x03, 0x56, 0xcf, 0x65, 0x9f, 0xb6, 0x6d, - 0xed, 0x53, 0x67, 0x47, 0x2a, 0x49, 0x82, 0xc2, 0x5d, 0xa8, 0x64, 0x9c, 0x46, 0x37, 0x4f, 0x99, - 0xe6, 0x7c, 0x94, 0xe6, 0x74, 0x42, 0x71, 0x1f, 0xd6, 0x04, 0x55, 0x8f, 0x07, 0xd4, 0x72, 0x13, - 0xc2, 0x5b, 0x73, 0x08, 0xcf, 0xbd, 0x4e, 0x78, 0xe7, 0x30, 0xf4, 0xbe, 0x9e, 0xc3, 0x7a, 0x1d, - 0x90, 0x70, 0xfd, 0x33, 0xcb, 0x09, 0x29, 0x8b, 0x03, 0xf8, 0x36, 0x80, 0x13, 0x49, 0x07, 0x9e, - 0xe5, 0x52, 0x11, 0xb8, 0x12, 0x29, 0x09, 0xc9, 0xae, 0xe5, 0x52, 0x7c, 0x13, 0xd6, 0x32, 0x46, - 0xca, 0x95, 0x77, 0xc0, 0x90, 0x56, 0xdf, 0x08, 0xb9, 0x70, 0xa6, 0x44, 0xca, 0xce, 0x14, 0x8a, - 0xd7, 0x60, 0x75, 0x3b, 0xa6, 0x89, 0x6f, 0xc3, 0x37, 0x94, 0x0f, 0x4a, 0xa8, 0xd8, 0x9a, 0x50, - 0x9e, 0xfa, 0x10, 0x93, 0x41, 0xe2, 0x04, 0xc3, 0x08, 0x6a, 0x8f, 0x19, 0x0d, 0x7a, 0xdc, 0xe2, - 0x09, 0xd5, 0x6f, 0x3a, 0xac, 0xa6, 0x84, 0x8a, 0xea, 0x12, 0x54, 0x6d, 0xef, 0x80, 0x32, 0x6e, - 0xfb, 0xde, 0x20, 0xb0, 0xb8, 0x7c, 0x92, 0x4e, 0x2a, 0x89, 0x94, 0x58, 0x9c, 0x46, 0xaf, 0xf6, - 0x42, 0x77, 0xa0, 0x42, 0x19, 0x95, 0x40, 0x9e, 0x94, 0xbc, 0xd0, 0x95, 0x11, 0x8c, 0xaa, 0xca, - 0x1a, 0xdb, 0x83, 0x19, 0xa6, 0x9c, 0x60, 0xaa, 0x59, 0x63, 0xbb, 0x9b, 0x21, 0x6b, 0xc1, 0x5a, - 0x10, 0x3a, 0x74, 0x16, 0x9e, 0x17, 0xf0, 0xd5, 0x48, 0x95, 0xc1, 0xe3, 0xaf, 0x60, 0x2d, 0x72, - 0xbc, 0x7b, 0x37, 0xeb, 0xfa, 0x39, 0x58, 0x0e, 0x19, 0x0d, 0x06, 0xf6, 0x48, 0xa5, 0xa1, 0x18, - 0x1d, 0xbb, 0x23, 0x74, 0x05, 0xf2, 0x23, 0x8b, 0x5b, 0xc2, 0xcd, 0xf2, 0xe6, 0xf9, 0x38, 0xe3, - 0xaf, 0x3d, 0x9e, 0x08, 0x18, 0xbe, 0x0f, 0x28, 0x52, 0xb1, 0x2c, 0xfb, 0x87, 0x50, 0x60, 0x91, - 0x40, 0xd5, 0xcd, 0x85, 0x34, 0xcb, 0x8c, 0x27, 0x44, 0x22, 0xf1, 0xaf, 0x3a, 0x98, 0x3b, 0x94, - 0x07, 0xf6, 0x90, 0xdd, 0xf3, 0x83, 0x74, 0xd9, 0xb3, 0x37, 0xdd, 0x7e, 0x37, 0xc1, 0x88, 0x1b, - 0x6b, 0xc0, 0x28, 0x57, 0x2d, 0x78, 0x76, 0x5e, 0x0b, 0x32, 0x52, 0x8e, 0xa1, 0x3d, 0xca, 0x71, - 0x17, 0x9a, 0x0b, 0x7d, 0x56, 0xa1, 0xb8, 0x0c, 0x45, 0x57, 0x40, 0x54, 0x2c, 0xaa, 0x31, 0xad, - 0x34, 0x24, 0x4a, 0x8b, 0xff, 0xd0, 0xe1, 0xcc, 0x4c, 0x5b, 0x45, 0x4f, 0x78, 0x12, 0xf8, 0xae, - 0xca, 0x75, 0x3a, 0x5b, 0xd5, 0x48, 0xde, 0x55, 0xe2, 0xee, 0x28, 0x9d, 0xce, 0xa5, 0x4c, 0x3a, - 0x6f, 0x43, 0x51, 0x94, 0x76, 0x3c, 0x58, 0x56, 0x33, 0xaf, 0xda, 0xb3, 0xec, 0x60, 0x6b, 0x5d, - 0x4d, 0x7e, 0x43, 0x88, 0xda, 0x23, 0x6b, 0xcc, 0x69, 0x40, 0x94, 0x19, 0x7a, 0x1f, 0x8a, 0xc3, - 0xc8, 0x19, 0x56, 0xcf, 0x0b, 0x82, 0x4a, 0x4c, 0x90, 0xee, 0x7c, 0x05, 0xc1, 0x3f, 0xe8, 0x50, - 0x90, 0xae, 0xbf, 0xa9, 0x5c, 0x35, 0x60, 0x85, 0x7a, 0x43, 0x7f, 0x64, 0x7b, 0x07, 0xa2, 0x45, - 0x0a, 0x24, 0x39, 0x23, 0xa4, 0x4a, 0x37, 0xea, 0x05, 0x43, 0xd5, 0x67, 0x1d, 0xde, 0xea, 0x07, - 0x96, 0xc7, 0x9e, 0xd0, 0x40, 0x38, 0x96, 0x24, 0x06, 0x7f, 0x07, 0x30, 0x8d, 0x77, 0x2a, 0x4e, - 0xfa, 0x7f, 0x8b, 0x53, 0x0b, 0x96, 0x99, 0xe5, 0x8e, 0x1d, 0xd1, 0xe1, 0x99, 0x44, 0xf7, 0x84, - 0x58, 0x45, 0x2a, 0x06, 0xe1, 0x1b, 0x50, 0x4a, 0xa8, 0x23, 0xcf, 0x93, 0x89, 0x68, 0x10, 0xf1, - 0x1b, 0xad, 0x43, 0x41, 0xcc, 0x3b, 0x11, 0x08, 0x83, 0xc8, 0x03, 0x6e, 0x43, 0x51, 0xf2, 0x4d, - 0xf5, 0x72, 0xe6, 0xc8, 0x43, 0x34, 0x2b, 0xe7, 0x44, 0xb1, 0xcc, 0xa7, 0x21, 0xc4, 0x6d, 0xa8, - 0x64, 0x4a, 0x35, 0xb3, 0x7e, 0xf4, 0x53, 0xae, 0x9f, 0xa2, 0x2c, 0xdf, 0xff, 0x1d, 0x37, 0x3c, - 0x00, 0x23, 0x7d, 0x09, 0xba, 0x04, 0x79, 0xfe, 0x6c, 0x2c, 0x5f, 0x55, 0x9d, 0xd2, 0x09, 0x75, - 0xff, 0xd9, 0x98, 0x12, 0xa1, 0x4e, 0x22, 0x26, 0xab, 0x7d, 0x26, 0x62, 0x39, 0x21, 0x94, 0x87, - 0xf7, 0x3e, 0x85, 0x52, 0x62, 0x8c, 0x4a, 0x50, 0xe8, 0x3c, 0x7a, 0xdc, 0xde, 0xae, 0x69, 0xa8, - 0x02, 0xa5, 0xdd, 0x87, 0xfd, 0x81, 0x3c, 0xea, 0xe8, 0x0c, 0x94, 0x49, 0xe7, 0x7e, 0xe7, 0x8b, - 0xc1, 0x4e, 0xbb, 0x7f, 0xe7, 0x41, 0x6d, 0x09, 0x21, 0xa8, 0x4a, 0xc1, 0xee, 0x43, 0x25, 0xcb, - 0x6d, 0xfe, 0x5c, 0x80, 0x95, 0xb8, 0xeb, 0xd0, 0x0d, 0xc8, 0xef, 0x85, 0xec, 0x10, 0xad, 0xcf, - 0xfb, 0x02, 0x6a, 0x9c, 0x9d, 0x91, 0xaa, 0xaa, 0xd3, 0xd0, 0x47, 0x50, 0x10, 0xfb, 0x16, 0xcd, - 0xfd, 0x7c, 0x69, 0xcc, 0xff, 0x28, 0xc1, 0x1a, 0xba, 0x0b, 0xe5, 0xd4, 0x9e, 0x5e, 0x60, 0x7d, - 0x21, 0x23, 0xcd, 0xae, 0x74, 0xac, 0x5d, 0xd3, 0xd1, 0x03, 0x28, 0xa7, 0x56, 0x2c, 0x6a, 0x64, - 0xd2, 0x95, 0x59, 0xd6, 0x53, 0xae, 0x39, 0x3b, 0x19, 0x6b, 0xa8, 0x03, 0x30, 0xdd, 0xae, 0xe8, - 0x7c, 0x06, 0x9c, 0x5e, 0xc3, 0x8d, 0xc6, 0x3c, 0x55, 0x42, 0xb3, 0x05, 0xa5, 0x64, 0xb7, 0xa0, - 0xfa, 0x9c, 0x75, 0x23, 0x49, 0x16, 0x2f, 0x22, 0xac, 0xa1, 0x7b, 0x60, 0xb4, 0x1d, 0xe7, 0x34, - 0x34, 0x8d, 0xb4, 0x86, 0xcd, 0xf2, 0x38, 0x70, 0x6e, 0xc1, 0x38, 0x47, 0x97, 0xb3, 0x63, 0x7b, - 0xd1, 0x8e, 0x6a, 0xbc, 0xfb, 0xaf, 0xb8, 0xe4, 0xb6, 0x1d, 0xa8, 0x66, 0x47, 0x13, 0x5a, 0xf4, - 0x7d, 0xd5, 0x30, 0x13, 0xc5, 0xfc, 0x59, 0xa6, 0x6d, 0xe8, 0x5b, 0x9f, 0x1c, 0x1d, 0x9b, 0xda, - 0x8b, 0x63, 0x53, 0x7b, 0x75, 0x6c, 0xea, 0xdf, 0x4f, 0x4c, 0xfd, 0x97, 0x89, 0xa9, 0x3f, 0x9f, - 0x98, 0xfa, 0xd1, 0xc4, 0xd4, 0xff, 0x9a, 0x98, 0xfa, 0xdf, 0x13, 0x53, 0x7b, 0x35, 0x31, 0xf5, - 0x1f, 0x4f, 0x4c, 0xed, 0xe8, 0xc4, 0xd4, 0x5e, 0x9c, 0x98, 0xda, 0x97, 0xc5, 0xa1, 0x63, 0x53, - 0x8f, 0xef, 0x17, 0xc5, 0xdf, 0x87, 0xeb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x9b, 0x35, - 0x18, 0x85, 0x0c, 0x00, 0x00, + // 1266 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xdf, 0x8d, 0x5f, 0x12, 0x3f, 0xde, 0xb8, 0xce, 0x24, 0x6d, 0xd3, 0xed, 0xff, 0xbf, 0x29, + 0x23, 0xb5, 0x44, 0x40, 0xdd, 0x92, 0xaa, 0xd0, 0x03, 0x55, 0xe5, 0xb4, 0x69, 0x6b, 0x94, 0xa4, + 0xe9, 0xd8, 0x05, 0x84, 0x84, 0xac, 0x8d, 0x3d, 0x49, 0x56, 0xec, 0x8b, 0xbb, 0x33, 0x8b, 0xe8, + 0x01, 0x09, 0x89, 0x0f, 0x00, 0x47, 0x3e, 0x02, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8f, 0x15, + 0x87, 0x8a, 0x3a, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xee, 0x5a, 0x44, 0x40, + 0x6f, 0x3b, 0xcf, 0xf3, 0x9b, 0xdf, 0x3c, 0xaf, 0x33, 0xcf, 0x82, 0x31, 0x08, 0x42, 0x4e, 0xbf, + 0x6e, 0x8d, 0xc2, 0x80, 0x07, 0xa8, 0x2a, 0x57, 0xe6, 0xe5, 0x43, 0x87, 0x1f, 0x45, 0xfb, 0xad, + 0x41, 0xe0, 0x5d, 0x39, 0x0c, 0x0e, 0x83, 0x2b, 0x42, 0xbd, 0x1f, 0x1d, 0x88, 0x95, 0x58, 0x88, + 0x2f, 0xb9, 0x0d, 0xff, 0xaa, 0x83, 0xf1, 0x69, 0xe8, 0x70, 0x4a, 0xe8, 0xe3, 0x88, 0x32, 0x8e, + 0x76, 0x01, 0xb8, 0xe3, 0x51, 0x46, 0x43, 0x87, 0xb2, 0x55, 0xfd, 0x42, 0x69, 0xbd, 0xbe, 0x81, + 0x5a, 0xea, 0xa8, 0x9e, 0xe3, 0xd1, 0xae, 0xd0, 0x6c, 0x9a, 0x4f, 0x5f, 0xac, 0x69, 0xbf, 0xbf, + 0x58, 0x43, 0x7b, 0x21, 0xb5, 0x5d, 0x37, 0x18, 0xf4, 0xd2, 0x5d, 0x24, 0xc3, 0x80, 0x3e, 0x84, + 0x6a, 0x37, 0x88, 0xc2, 0x01, 0x5d, 0x9d, 0xbb, 0xa0, 0xaf, 0x37, 0x36, 0xd6, 0x12, 0xae, 0xec, + 0xa9, 0x2d, 0x09, 0xd9, 0xf2, 0x23, 0x8f, 0x54, 0x99, 0xf8, 0xc6, 0x6b, 0x00, 0x13, 0x29, 0x9a, + 0x87, 0x52, 0x7b, 0xaf, 0xd3, 0xd4, 0xd0, 0x02, 0x94, 0xc9, 0xa3, 0xed, 0xad, 0xa6, 0x8e, 0x4f, + 0xc1, 0xa2, 0xe2, 0x60, 0xa3, 0xc0, 0x67, 0x14, 0xdf, 0x84, 0x3a, 0xa1, 0xf6, 0x30, 0xf1, 0xa4, + 0x05, 0xf3, 0x8f, 0xa3, 0xac, 0x1b, 0x2b, 0xc9, 0xd1, 0x0f, 0x23, 0x1a, 0x3e, 0x51, 0x30, 0x92, + 0x80, 0xf0, 0x2d, 0x30, 0xe4, 0x76, 0x49, 0x87, 0xae, 0xc0, 0x7c, 0x48, 0x59, 0xe4, 0xf2, 0x64, + 0xff, 0xe9, 0xa9, 0xfd, 0x12, 0x47, 0x12, 0x14, 0xfe, 0x51, 0x07, 0x23, 0x4b, 0x8d, 0xde, 0x03, + 0xc4, 0xb8, 0x1d, 0xf2, 0xbe, 0x88, 0x07, 0xb7, 0xbd, 0x51, 0xdf, 0x8b, 0xc9, 0xf4, 0xf5, 0x12, + 0x69, 0x0a, 0x4d, 0x2f, 0x51, 0xec, 0x30, 0xb4, 0x0e, 0x4d, 0xea, 0x0f, 0xf3, 0xd8, 0x39, 0x81, + 0x6d, 0x50, 0x7f, 0x98, 0x45, 0x5e, 0x85, 0x05, 0xcf, 0xe6, 0x83, 0x23, 0x1a, 0xb2, 0xd5, 0x52, + 0xde, 0xb5, 0x6d, 0x7b, 0x9f, 0xba, 0x3b, 0x52, 0x49, 0x52, 0x14, 0xee, 0xc0, 0x62, 0xce, 0x68, + 0x74, 0xe3, 0x84, 0x69, 0x2e, 0xc7, 0x69, 0xce, 0x26, 0x14, 0xf7, 0x60, 0x59, 0x50, 0x75, 0x79, + 0x48, 0x6d, 0x2f, 0x25, 0xbc, 0x59, 0x40, 0x78, 0xf6, 0x75, 0xc2, 0xdb, 0x47, 0x91, 0xff, 0x65, + 0x01, 0xeb, 0x35, 0x40, 0xc2, 0xf4, 0x4f, 0x6c, 0x37, 0xa2, 0x2c, 0x09, 0xe0, 0xff, 0x01, 0xdc, + 0x58, 0xda, 0xf7, 0x6d, 0x8f, 0x8a, 0xc0, 0xd5, 0x48, 0x4d, 0x48, 0x76, 0x6d, 0x8f, 0xe2, 0x1b, + 0xb0, 0x9c, 0xdb, 0xa4, 0x4c, 0x79, 0x0b, 0x0c, 0xb9, 0xeb, 0x2b, 0x21, 0x17, 0xc6, 0xd4, 0x48, + 0xdd, 0x9d, 0x40, 0xf1, 0x32, 0x2c, 0x6d, 0x27, 0x34, 0xc9, 0x69, 0xf8, 0xba, 0xb2, 0x41, 0x09, + 0x15, 0xdb, 0x1a, 0xd4, 0x27, 0x36, 0x24, 0x64, 0x90, 0x1a, 0xc1, 0x30, 0x82, 0xe6, 0x23, 0x46, + 0xc3, 0x2e, 0xb7, 0x79, 0x4a, 0xf5, 0x8b, 0x0e, 0x4b, 0x19, 0xa1, 0xa2, 0xba, 0x08, 0x0d, 0xc7, + 0x3f, 0xa4, 0x8c, 0x3b, 0x81, 0xdf, 0x0f, 0x6d, 0x2e, 0x5d, 0xd2, 0xc9, 0x62, 0x2a, 0x25, 0x36, + 0xa7, 0xb1, 0xd7, 0x7e, 0xe4, 0xf5, 0x55, 0x28, 0xe3, 0x12, 0x28, 0x93, 0x9a, 0x1f, 0x79, 0x32, + 0x82, 0x71, 0x55, 0xd9, 0x23, 0xa7, 0x3f, 0xc5, 0x54, 0x12, 0x4c, 0x4d, 0x7b, 0xe4, 0x74, 0x72, + 0x64, 0x2d, 0x58, 0x0e, 0x23, 0x97, 0x4e, 0xc3, 0xcb, 0x02, 0xbe, 0x14, 0xab, 0x72, 0x78, 0xfc, + 0x05, 0x2c, 0xc7, 0x86, 0x77, 0xee, 0xe4, 0x4d, 0x3f, 0x0b, 0xf3, 0x11, 0xa3, 0x61, 0xdf, 0x19, + 0xaa, 0x34, 0x54, 0xe3, 0x65, 0x67, 0x88, 0x2e, 0x43, 0x79, 0x68, 0x73, 0x5b, 0x98, 0x59, 0xdf, + 0x38, 0x97, 0x64, 0xfc, 0x35, 0xe7, 0x89, 0x80, 0xe1, 0x7b, 0x80, 0x62, 0x15, 0xcb, 0xb3, 0xbf, + 0x0f, 0x15, 0x16, 0x0b, 0x54, 0xdd, 0x9c, 0xcf, 0xb2, 0x4c, 0x59, 0x42, 0x24, 0x12, 0xff, 0xac, + 0x83, 0xb5, 0x43, 0x79, 0xe8, 0x0c, 0xd8, 0xdd, 0x20, 0xcc, 0x96, 0x3d, 0x7b, 0xd3, 0xed, 0x77, + 0x03, 0x8c, 0xa4, 0xb1, 0xfa, 0x8c, 0x72, 0xd5, 0x82, 0xa7, 0x8b, 0x5a, 0x90, 0x91, 0x7a, 0x02, + 0xed, 0x52, 0x8e, 0x3b, 0xb0, 0x36, 0xd3, 0x66, 0x15, 0x8a, 0x4b, 0x50, 0xf5, 0x04, 0x44, 0xc5, + 0xa2, 0x91, 0xd0, 0xca, 0x8d, 0x44, 0x69, 0xf1, 0x6f, 0x3a, 0x9c, 0x9a, 0x6a, 0xab, 0xd8, 0x85, + 0x83, 0x30, 0xf0, 0x54, 0xae, 0xb3, 0xd9, 0x6a, 0xc4, 0xf2, 0x8e, 0x12, 0x77, 0x86, 0xd9, 0x74, + 0xce, 0xe5, 0xd2, 0x79, 0x0b, 0xaa, 0xa2, 0xb4, 0x93, 0x8b, 0x65, 0x29, 0xe7, 0xd5, 0x9e, 0xed, + 0x84, 0x9b, 0x2b, 0xea, 0xe6, 0x37, 0x84, 0xa8, 0x3d, 0xb4, 0x47, 0x9c, 0x86, 0x44, 0x6d, 0x43, + 0xef, 0x42, 0x75, 0x10, 0x1b, 0xc3, 0x56, 0xcb, 0x82, 0x60, 0x31, 0x21, 0xc8, 0x76, 0xbe, 0x82, + 0xe0, 0xef, 0x75, 0xa8, 0x48, 0xd3, 0xdf, 0x54, 0xae, 0x4c, 0x58, 0xa0, 0xfe, 0x20, 0x18, 0x3a, + 0xfe, 0xa1, 0x68, 0x91, 0x0a, 0x49, 0xd7, 0x08, 0xa9, 0xd2, 0x8d, 0x7b, 0xc1, 0x50, 0xf5, 0xb9, + 0x0a, 0x67, 0x7a, 0xa1, 0xed, 0xb3, 0x03, 0x1a, 0x0a, 0xc3, 0xd2, 0xc4, 0xe0, 0x6f, 0x00, 0x26, + 0xf1, 0xce, 0xc4, 0x49, 0xff, 0x67, 0x71, 0x6a, 0xc1, 0x3c, 0xb3, 0xbd, 0x91, 0x2b, 0x3a, 0x3c, + 0x97, 0xe8, 0xae, 0x10, 0xab, 0x48, 0x25, 0x20, 0x7c, 0x1d, 0x6a, 0x29, 0x75, 0x6c, 0x79, 0x7a, + 0x23, 0x1a, 0x44, 0x7c, 0xa3, 0x15, 0xa8, 0x88, 0xfb, 0x4e, 0x04, 0xc2, 0x20, 0x72, 0x81, 0xdb, + 0x50, 0x95, 0x7c, 0x13, 0xbd, 0xbc, 0x73, 0xe4, 0x22, 0xbe, 0x2b, 0x0b, 0xa2, 0x58, 0xe7, 0x93, + 0x10, 0xe2, 0x36, 0x2c, 0xe6, 0x4a, 0x35, 0xf7, 0xfc, 0xe8, 0x27, 0x7c, 0x7e, 0xaa, 0xb2, 0x7c, + 0xff, 0x75, 0xdc, 0x70, 0x1f, 0x8c, 0xec, 0x21, 0xe8, 0x22, 0x94, 0xf9, 0x93, 0x91, 0xf4, 0xaa, + 0x31, 0xa1, 0x13, 0xea, 0xde, 0x93, 0x11, 0x25, 0x42, 0x9d, 0x46, 0x4c, 0x56, 0xfb, 0x54, 0xc4, + 0x4a, 0x42, 0xa8, 0x22, 0xf6, 0x9d, 0x0e, 0x8d, 0x49, 0xa2, 0xef, 0x3a, 0x2e, 0xfd, 0x2f, 0xfa, + 0xca, 0x84, 0x85, 0x03, 0xc7, 0xa5, 0xc2, 0x06, 0x79, 0x5c, 0xba, 0x2e, 0xac, 0xc3, 0x33, 0xb0, + 0x92, 0xd4, 0x61, 0xaf, 0x7b, 0x67, 0x33, 0xa9, 0xc2, 0x77, 0x3e, 0x86, 0x5a, 0xea, 0x1a, 0xaa, + 0x41, 0x65, 0xeb, 0xe1, 0xa3, 0xf6, 0x76, 0x53, 0x43, 0x8b, 0x50, 0xdb, 0x7d, 0xd0, 0xeb, 0xcb, + 0xa5, 0x8e, 0x4e, 0x41, 0x9d, 0x6c, 0xdd, 0xdb, 0xfa, 0xac, 0xbf, 0xd3, 0xee, 0xdd, 0xbe, 0xdf, + 0x9c, 0x43, 0x08, 0x1a, 0x52, 0xb0, 0xfb, 0x40, 0xc9, 0x4a, 0x1b, 0xc7, 0x15, 0x58, 0x48, 0x6c, + 0x47, 0xd7, 0xa1, 0xbc, 0x17, 0xb1, 0x23, 0xb4, 0x52, 0x34, 0x9f, 0x99, 0xa7, 0xa7, 0xa4, 0xaa, + 0x27, 0x34, 0xf4, 0x01, 0x54, 0xc4, 0x34, 0x80, 0x0a, 0x87, 0x2b, 0xb3, 0x78, 0x64, 0xc2, 0x1a, + 0xba, 0x03, 0xf5, 0xcc, 0x14, 0x31, 0x63, 0xf7, 0xf9, 0x9c, 0x34, 0x3f, 0x70, 0x60, 0xed, 0xaa, + 0x8e, 0xee, 0x43, 0x3d, 0x33, 0x00, 0x20, 0x33, 0x57, 0x4c, 0xb9, 0x51, 0x62, 0xc2, 0x55, 0x30, + 0x31, 0x60, 0x0d, 0x6d, 0x01, 0x4c, 0xde, 0x7e, 0x74, 0x2e, 0x07, 0xce, 0x0e, 0x09, 0xa6, 0x59, + 0xa4, 0x4a, 0x69, 0x36, 0xa1, 0x96, 0xbe, 0x7c, 0x68, 0xb5, 0xe0, 0x31, 0x94, 0x24, 0xb3, 0x9f, + 0x49, 0xac, 0xa1, 0xbb, 0x60, 0xb4, 0x5d, 0xf7, 0x24, 0x34, 0x66, 0x56, 0xc3, 0xa6, 0x79, 0x5c, + 0x38, 0x3b, 0xe3, 0xb1, 0x41, 0x97, 0xf2, 0x8f, 0xca, 0xac, 0x17, 0xd4, 0x7c, 0xfb, 0x6f, 0x71, + 0xe9, 0x69, 0x3b, 0xd0, 0xc8, 0x5f, 0x9c, 0x68, 0xd6, 0xf4, 0x67, 0x5a, 0xa9, 0xa2, 0xf8, 0xa6, + 0xd5, 0xd6, 0xe3, 0xcc, 0x1a, 0xd9, 0xfa, 0x47, 0x67, 0x5e, 0x27, 0x8b, 0x5b, 0xd3, 0xfc, 0xdf, + 0x34, 0x57, 0xb6, 0x5b, 0x62, 0xa6, 0xcd, 0x8f, 0x9e, 0xbd, 0xb4, 0xb4, 0xe7, 0x2f, 0x2d, 0xed, + 0xd5, 0x4b, 0x4b, 0xff, 0x76, 0x6c, 0xe9, 0x3f, 0x8d, 0x2d, 0xfd, 0xe9, 0xd8, 0xd2, 0x9f, 0x8d, + 0x2d, 0xfd, 0x8f, 0xb1, 0xa5, 0xff, 0x39, 0xb6, 0xb4, 0x57, 0x63, 0x4b, 0xff, 0xe1, 0xd8, 0xd2, + 0x9e, 0x1d, 0x5b, 0xda, 0xf3, 0x63, 0x4b, 0xfb, 0xbc, 0x3a, 0x70, 0x1d, 0xea, 0xf3, 0xfd, 0xaa, + 0xf8, 0x4d, 0xba, 0xf6, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x33, 0x55, 0x6d, 0x0d, + 0x00, 0x00, } func (x MatchType) String() string { @@ -2181,6 +2289,60 @@ func (this *LabelMatcher) Equal(that interface{}) bool { } return true } +func (this *TimeSeriesFile) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesFile) + if !ok { + that2, ok := that.(TimeSeriesFile) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if this.Filename != that1.Filename { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferTSDBResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferTSDBResponse) + if !ok { + that2, ok := that.(TransferTSDBResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} func (this *WriteRequest) GoString() string { if this == nil { return "nil" @@ -2497,6 +2659,28 @@ func (this *LabelMatcher) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *TimeSeriesFile) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&client.TimeSeriesFile{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferTSDBResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&client.TransferTSDBResponse{") + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringCortex(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2528,6 +2712,8 @@ type IngesterClient interface { MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) } type ingesterClient struct { @@ -2667,6 +2853,40 @@ func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, return m, nil } +func (c *ingesterClient) TransferTSDB(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferTSDBClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/TransferTSDB", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferTSDBClient{stream} + return x, nil +} + +type Ingester_TransferTSDBClient interface { + Send(*TimeSeriesFile) error + CloseAndRecv() (*TransferTSDBResponse, error) + grpc.ClientStream +} + +type ingesterTransferTSDBClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferTSDBClient) Send(m *TimeSeriesFile) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBClient) CloseAndRecv() (*TransferTSDBResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferTSDBResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // IngesterServer is the server API for Ingester service. type IngesterServer interface { Push(context.Context, *WriteRequest) (*WriteResponse, error) @@ -2679,6 +2899,8 @@ type IngesterServer interface { MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). TransferChunks(Ingester_TransferChunksServer) error + // TransferTSDB transfers all files of a tsdb to a joining ingester + TransferTSDB(Ingester_TransferTSDBServer) error } func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { @@ -2858,6 +3080,32 @@ func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { return m, nil } +func _Ingester_TransferTSDB_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferTSDB(&ingesterTransferTSDBServer{stream}) +} + +type Ingester_TransferTSDBServer interface { + SendAndClose(*TransferTSDBResponse) error + Recv() (*TimeSeriesFile, error) + grpc.ServerStream +} + +type ingesterTransferTSDBServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferTSDBServer) SendAndClose(m *TransferTSDBResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferTSDBServer) Recv() (*TimeSeriesFile, error) { + m := new(TimeSeriesFile) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + var _Ingester_serviceDesc = grpc.ServiceDesc{ ServiceName: "cortex.Ingester", HandlerType: (*IngesterServer)(nil), @@ -2902,6 +3150,11 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ Handler: _Ingester_TransferChunks_Handler, ClientStreams: true, }, + { + StreamName: "TransferTSDB", + Handler: _Ingester_TransferTSDB_Handler, + ClientStreams: true, + }, }, Metadata: "cortex.proto", } @@ -3727,6 +3980,66 @@ func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FromIngesterId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.FromIngesterId))) + i += copy(dAtA[i:], m.FromIngesterId) + } + if len(m.UserId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) + } + if len(m.Filename) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Filename))) + i += copy(dAtA[i:], m.Filename) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintCortex(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *TransferTSDBResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferTSDBResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -4157,6 +4470,40 @@ func (m *LabelMatcher) Size() (n int) { return n } +func (m *TimeSeriesFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovCortex(uint64(l)) + } + return n +} + +func (m *TransferTSDBResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + func sovCortex(x uint64) (n int) { for { n++ @@ -4446,6 +4793,28 @@ func (this *LabelMatcher) String() string { }, "") return s } +func (this *TimeSeriesFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeriesFile{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferTSDBResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferTSDBResponse{`, + `}`, + }, "") + return s +} func valueToStringCortex(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -6989,6 +7358,242 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { } return nil } +func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferTSDBResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferTSDBResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferTSDBResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipCortex(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/pkg/ingester/client/cortex.proto b/pkg/ingester/client/cortex.proto index e165991992..f1c83305d2 100644 --- a/pkg/ingester/client/cortex.proto +++ b/pkg/ingester/client/cortex.proto @@ -22,6 +22,9 @@ service Ingester { // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; + + // TransferTSDB transfers all files of a tsdb to a joining ingester + rpc TransferTSDB(stream TimeSeriesFile) returns (TransferTSDBResponse) {}; } message WriteRequest { @@ -154,3 +157,12 @@ message LabelMatcher { string name = 2; string value = 3; } + +message TimeSeriesFile { + string from_ingester_id = 1; + string user_id = 2; + string filename = 3; + bytes data = 4; +} + +message TransferTSDBResponse {} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 4f5cc0ecbe..22e90c3130 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -22,6 +22,7 @@ import ( cortex_chunk "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" @@ -116,6 +117,10 @@ type Config struct { RateUpdatePeriod time.Duration + // Use tsdb block storage + TSDBEnabled bool `yaml:"-"` + TSDBConfig tsdb.Config `yaml:"-"` + // For testing, you can override the address and ID of this ingester. ingesterClientFactory func(addr string, cfg client.Config) (client.HealthAndIngesterClient, error) } @@ -162,6 +167,9 @@ type Ingester struct { // Hook for injecting behaviour from tests. preFlushUserSeries func() + + // Prometheus block storage + TSDBState TSDBState } // ChunkStore is the interface we need to store chunks @@ -175,6 +183,10 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c cfg.ingesterClientFactory = client.MakeIngesterClient } + if cfg.TSDBEnabled { + return NewV2(cfg, clientConfig, limits, chunkStore, registerer) + } + i := &Ingester{ cfg: cfg, clientConfig: clientConfig, @@ -249,6 +261,10 @@ func (i *Ingester) StopIncomingRequests() { // Push implements client.IngesterServer func (i *Ingester) Push(ctx old_ctx.Context, req *client.WriteRequest) (*client.WriteResponse, error) { + if i.cfg.TSDBEnabled { + return i.v2Push(ctx, req) + } + userID, err := user.ExtractOrgID(ctx) if err != nil { return nil, fmt.Errorf("no user id") @@ -344,6 +360,10 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, // Query implements service.IngesterServer func (i *Ingester) Query(ctx old_ctx.Context, req *client.QueryRequest) (*client.QueryResponse, error) { + if i.cfg.TSDBEnabled { + return i.v2Query(ctx, req) + } + userID, err := user.ExtractOrgID(ctx) if err != nil { return nil, err @@ -403,6 +423,10 @@ func (i *Ingester) Query(ctx old_ctx.Context, req *client.QueryRequest) (*client // QueryStream implements service.IngesterServer func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { + if i.cfg.TSDBEnabled { + return fmt.Errorf("Unimplemented for V2") + } + log, ctx := spanlogger.New(stream.Context(), "QueryStream") from, through, matchers, err := client.FromQueryRequest(req) @@ -475,6 +499,10 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ // LabelValues returns all label values that are associated with a given label name. func (i *Ingester) LabelValues(ctx old_ctx.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { + if i.cfg.TSDBEnabled { + return i.v2LabelValues(ctx, req) + } + i.userStatesMtx.RLock() defer i.userStatesMtx.RUnlock() state, ok, err := i.userStates.getViaContext(ctx) @@ -492,6 +520,10 @@ func (i *Ingester) LabelValues(ctx old_ctx.Context, req *client.LabelValuesReque // LabelNames return all the label names. func (i *Ingester) LabelNames(ctx old_ctx.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { + if i.cfg.TSDBEnabled { + return i.v2LabelNames(ctx, req) + } + i.userStatesMtx.RLock() defer i.userStatesMtx.RUnlock() state, ok, err := i.userStates.getViaContext(ctx) diff --git a/pkg/ingester/ingester_v2.go b/pkg/ingester/ingester_v2.go new file mode 100644 index 0000000000..99fdf464a9 --- /dev/null +++ b/pkg/ingester/ingester_v2.go @@ -0,0 +1,308 @@ +package ingester + +import ( + "fmt" + "path/filepath" + "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/validation" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb" + lbls "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/shipper" + "github.com/weaveworks/common/user" + "golang.org/x/net/context" + old_ctx "golang.org/x/net/context" +) + +// TSDBState holds data structures used by the TSDB storage engine +type TSDBState struct { + dbs map[string]*tsdb.DB // tsdb sharded by userID + bucket objstore.Bucket +} + +// NewV2 returns a new Ingester that uses prometheus block storage instead of chunk storage +func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { + var bkt *s3.Bucket + s3Cfg := s3.Config{ + Bucket: cfg.TSDBConfig.S3.BucketName, + Endpoint: cfg.TSDBConfig.S3.Endpoint, + AccessKey: cfg.TSDBConfig.S3.AccessKeyID, + SecretKey: cfg.TSDBConfig.S3.SecretAccessKey, + Insecure: cfg.TSDBConfig.S3.Insecure, + } + var err error + bkt, err = s3.NewBucketWithConfig(util.Logger, s3Cfg, "cortex") + if err != nil { + return nil, err + } + + i := &Ingester{ + cfg: cfg, + clientConfig: clientConfig, + + metrics: newIngesterMetrics(registerer), + + limits: limits, + chunkStore: chunkStore, + userStates: newUserStates(limits, cfg), + quit: make(chan struct{}), + + TSDBState: TSDBState{ + dbs: make(map[string]*tsdb.DB), + bucket: bkt, + }, + } + + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester") + if err != nil { + return nil, err + } + + return i, nil +} + +// v2Push adds metrics to a block +func (i *Ingester) v2Push(ctx old_ctx.Context, req *client.WriteRequest) (*client.WriteResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id") + } + + db, err := i.getOrCreateTSDB(userID) + if err != nil { + return nil, err + } + + // Walk the samples, appending them to the users database + app := db.Appender() + for _, ts := range req.Timeseries { + for _, s := range ts.Samples { + if i.stopped { + return nil, fmt.Errorf("ingester stopping") + } + lset := make(lbls.Labels, len(ts.Labels)) + for i := range ts.Labels { + lset[i] = lbls.Label{ + Name: ts.Labels[i].Name, + Value: ts.Labels[i].Value, + } + } + if _, err := app.Add(lset, s.TimestampMs, s.Value); err != nil { + if err := app.Rollback(); err != nil { + level.Warn(util.Logger).Log("failed to rollback on error", "userID", userID, "err", err) + } + return nil, err + } + } + } + if err := app.Commit(); err != nil { + return nil, err + } + + client.ReuseSlice(req.Timeseries) + + return &client.WriteResponse{}, nil +} + +func (i *Ingester) v2Query(ctx old_ctx.Context, req *client.QueryRequest) (*client.QueryResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + from, through, matchers, err := client.FromQueryRequest(req) + if err != nil { + return nil, err + } + + i.metrics.queries.Inc() + + db, err := i.getOrCreateTSDB(userID) + if err != nil { + return nil, fmt.Errorf("failed to find/create user db: %v", err) + } + + q, err := db.Querier(int64(from), int64(through)) + if err != nil { + return nil, err + } + defer q.Close() + + // two different versions of the labels package are being used, converting matchers must be done + var converted []lbls.Matcher + for _, m := range matchers { + switch m.Type { + case labels.MatchEqual: + converted = append(converted, lbls.NewEqualMatcher(m.Name, m.Value)) + case labels.MatchNotEqual: + converted = append(converted, lbls.Not(lbls.NewEqualMatcher(m.Name, m.Value))) + case labels.MatchRegexp: + rm, err := lbls.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$") + if err != nil { + return nil, err + } + converted = append(converted, rm) + case labels.MatchNotRegexp: + rm, err := lbls.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$") + if err != nil { + return nil, err + } + converted = append(converted, lbls.Not(rm)) + } + } + ss, err := q.Select(converted...) + if err != nil { + return nil, err + } + + result := &client.QueryResponse{} + for ss.Next() { + series := ss.At() + + // convert labels to LabelAdapter + var adapters []client.LabelAdapter + for _, l := range series.Labels() { + adapters = append(adapters, client.LabelAdapter(l)) + } + ts := client.TimeSeries{ + Labels: adapters, + } + + it := series.Iterator() + for it.Next() { + t, v := it.At() + ts.Samples = append(ts.Samples, client.Sample{Value: v, TimestampMs: t}) + } + + result.Timeseries = append(result.Timeseries, ts) + } + + return result, ss.Err() +} + +func (i *Ingester) v2LabelValues(ctx old_ctx.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db, err := i.getOrCreateTSDB(userID) + if err != nil { + return nil, fmt.Errorf("failed to find/create user db: %v", err) + } + + through := time.Now() + from := through.Add(-i.cfg.TSDBConfig.Retention) + q, err := db.Querier(from.Unix()*1000, through.Unix()*1000) + if err != nil { + return nil, err + } + defer q.Close() + + vals, err := q.LabelValues(req.LabelName) + if err != nil { + return nil, err + } + + return &client.LabelValuesResponse{ + LabelValues: vals, + }, nil +} + +func (i *Ingester) v2LabelNames(ctx old_ctx.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db, err := i.getOrCreateTSDB(userID) + if err != nil { + return nil, fmt.Errorf("failed to find/create user db: %v", err) + } + + through := time.Now() + from := through.Add(-i.cfg.TSDBConfig.Retention) + q, err := db.Querier(from.Unix()*1000, through.Unix()*1000) + if err != nil { + return nil, err + } + defer q.Close() + + names, err := q.LabelNames() + if err != nil { + return nil, err + } + + return &client.LabelNamesResponse{ + LabelNames: names, + }, nil +} + +func (i *Ingester) getTSDB(userID string) *tsdb.DB { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + db, _ := i.TSDBState.dbs[userID] + return db +} + +func (i *Ingester) getOrCreateTSDB(userID string) (*tsdb.DB, error) { + db := i.getTSDB(userID) + if db == nil { + i.userStatesMtx.Lock() + defer i.userStatesMtx.Unlock() + + // Check again for DB in the event it was created in-between locks + var ok bool + db, ok = i.TSDBState.dbs[userID] + if !ok { + + udir := i.userDir(userID) + + // Create a new user database + var err error + db, err = tsdb.Open(udir, util.Logger, nil, &tsdb.Options{ + RetentionDuration: uint64(i.cfg.TSDBConfig.Retention / time.Millisecond), + BlockRanges: []int64{int64(i.cfg.TSDBConfig.BlockRanges / time.Millisecond)}, + NoLockfile: true, + }) + if err != nil { + return nil, err + } + + // Create a new shipper for this database + l := lbls.Labels{ + { + Name: "user", + Value: userID, + }, + } + s := shipper.New(util.Logger, nil, udir, &Bucket{userID, i.TSDBState.bucket}, func() lbls.Labels { return l }, metadata.ReceiveSource) + i.done.Add(1) + go func() { + defer i.done.Done() + runutil.Repeat(i.cfg.TSDBConfig.ShipInterval, i.quit, func() error { + if uploaded, err := s.Sync(context.Background()); err != nil { + level.Warn(util.Logger).Log("err", err, "uploaded", uploaded) + } + return nil + }) + }() + + i.TSDBState.dbs[userID] = db + } + } + + return db, nil +} + +func (i *Ingester) userDir(userID string) string { return filepath.Join(i.cfg.TSDBConfig.Dir, userID) } diff --git a/pkg/ingester/lifecycle_test.go b/pkg/ingester/lifecycle_test.go index 6cabb4e0b6..1c1ca391d5 100644 --- a/pkg/ingester/lifecycle_test.go +++ b/pkg/ingester/lifecycle_test.go @@ -2,6 +2,7 @@ package ingester import ( "io" + "io/ioutil" "math" "testing" "time" @@ -20,6 +21,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/consul" "github.com/cortexproject/cortex/pkg/ring/testutils" + "github.com/cortexproject/cortex/pkg/storage/tsdb/backend/s3" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/test" "github.com/cortexproject/cortex/pkg/util/validation" @@ -32,6 +34,7 @@ func defaultIngesterTestConfig() Config { consul := consul.NewInMemoryClient(ring.GetCodec()) cfg := Config{} flagext.DefaultValues(&cfg) + flagext.DefaultValues(&cfg.TSDBConfig) cfg.FlushCheckPeriod = 99999 * time.Hour cfg.MaxChunkIdle = 99999 * time.Hour cfg.ConcurrentFlushes = 1 @@ -277,6 +280,76 @@ func (i ingesterClientAdapater) TransferChunks(ctx context.Context, _ ...grpc.Ca return stream, nil } +type ingesterTransferTSDBStreamMock struct { + ctx context.Context + reqs chan *client.TimeSeriesFile + resp chan *client.TransferTSDBResponse + err chan error + + grpc.ServerStream + grpc.ClientStream +} + +func (s *ingesterTransferTSDBStreamMock) Send(tsc *client.TimeSeriesFile) error { + s.reqs <- tsc + return nil +} + +func (s *ingesterTransferTSDBStreamMock) CloseAndRecv() (*client.TransferTSDBResponse, error) { + close(s.reqs) + select { + case resp := <-s.resp: + return resp, nil + case err := <-s.err: + return nil, err + } +} + +func (s *ingesterTransferTSDBStreamMock) SendAndClose(resp *client.TransferTSDBResponse) error { + s.resp <- resp + return nil +} + +func (s *ingesterTransferTSDBStreamMock) ErrorAndClose(err error) { + s.err <- err +} + +func (s *ingesterTransferTSDBStreamMock) Recv() (*client.TimeSeriesFile, error) { + req, ok := <-s.reqs + if !ok { + return nil, io.EOF + } + return req, nil +} + +func (s *ingesterTransferTSDBStreamMock) Context() context.Context { + return s.ctx +} + +func (*ingesterTransferTSDBStreamMock) SendMsg(m interface{}) error { + return nil +} + +func (*ingesterTransferTSDBStreamMock) RecvMsg(m interface{}) error { + return nil +} + +func (i ingesterClientAdapater) TransferTSDB(ctx context.Context, _ ...grpc.CallOption) (client.Ingester_TransferTSDBClient, error) { + stream := &ingesterTransferTSDBStreamMock{ + ctx: ctx, + reqs: make(chan *client.TimeSeriesFile), + resp: make(chan *client.TransferTSDBResponse), + err: make(chan error), + } + go func() { + err := i.ingester.TransferTSDB(stream) + if err != nil { + stream.ErrorAndClose(err) + } + }() + return stream, nil +} + func (i ingesterClientAdapater) Close() error { return nil } @@ -341,3 +414,111 @@ func TestIngesterFlush(t *testing.T) { }, }, res) } + +func TestV2IngesterTransfer(t *testing.T) { + limits, err := validation.NewOverrides(defaultLimitsTestConfig()) + require.NoError(t, err) + + dir1, err := ioutil.TempDir("", "tsdb") + require.NoError(t, err) + dir2, err := ioutil.TempDir("", "tsdb") + require.NoError(t, err) + + // Start the first ingester, and get it into ACTIVE state. + cfg1 := defaultIngesterTestConfig() + cfg1.TSDBEnabled = true + cfg1.TSDBConfig.Dir = dir1 + cfg1.TSDBConfig.S3 = s3.Config{ + Endpoint: "dummy", + BucketName: "dummy", + SecretAccessKey: "dummy", + AccessKeyID: "dummy", + } + cfg1.LifecyclerConfig.ID = "ingester1" + cfg1.LifecyclerConfig.Addr = "ingester1" + cfg1.LifecyclerConfig.JoinAfter = 0 * time.Second + cfg1.MaxTransferRetries = 10 + ing1, err := New(cfg1, defaultClientTestConfig(), limits, nil, nil) + require.NoError(t, err) + + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + return ing1.lifecycler.GetState() + }) + + // Now write a sample to this ingester + const ts = 123000 + const val = 456 + var ( + l = labels.Labels{{Name: labels.MetricName, Value: "foo"}} + sampleData = []client.Sample{ + { + TimestampMs: ts, + Value: val, + }, + } + expectedResponse = &client.QueryResponse{ + Timeseries: []client.TimeSeries{ + { + Labels: client.FromLabelsToLabelAdapters(l), + Samples: []client.Sample{ + { + Value: val, + TimestampMs: ts, + }, + }, + }, + }, + } + ) + ctx := user.InjectOrgID(context.Background(), userID) + _, err = ing1.Push(ctx, client.ToWriteRequest([]labels.Labels{l}, sampleData, client.API)) + require.NoError(t, err) + + // Start a second ingester, but let it go into PENDING + cfg2 := defaultIngesterTestConfig() + cfg2.TSDBEnabled = true + cfg2.TSDBConfig.Dir = dir2 + cfg2.TSDBConfig.S3 = s3.Config{ + Endpoint: "dummy", + BucketName: "dummy", + SecretAccessKey: "dummy", + AccessKeyID: "dummy", + } + cfg2.LifecyclerConfig.RingConfig.KVStore.Mock = cfg1.LifecyclerConfig.RingConfig.KVStore.Mock + cfg2.LifecyclerConfig.ID = "ingester2" + cfg2.LifecyclerConfig.Addr = "ingester2" + cfg2.LifecyclerConfig.JoinAfter = 100 * time.Second + ing2, err := New(cfg2, defaultClientTestConfig(), limits, nil, nil) + require.NoError(t, err) + + // Let ing2 send blocks/wal to ing1 + ing1.cfg.ingesterClientFactory = func(addr string, _ client.Config) (client.HealthAndIngesterClient, error) { + return ingesterClientAdapater{ + ingester: ing2, + }, nil + } + + // Now stop the first ingester, and wait for the second ingester to become ACTIVE. + ing1.Shutdown() + test.Poll(t, 10*time.Second, ring.ACTIVE, func() interface{} { + return ing2.lifecycler.GetState() + }) + + // And check the second ingester has the sample + matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "foo") + require.NoError(t, err) + + request, err := client.ToQueryRequest(model.TimeFromUnix(0), model.TimeFromUnix(200), []*labels.Matcher{matcher}) + require.NoError(t, err) + + response, err := ing2.Query(ctx, request) + require.NoError(t, err) + assert.Equal(t, expectedResponse, response) + + // Check we can send the same sample again to the new ingester and get the same result + _, err = ing2.Push(ctx, client.ToWriteRequest([]labels.Labels{l}, sampleData, client.API)) + require.NoError(t, err) + response, err = ing2.Query(ctx, request) + require.NoError(t, err) + assert.Equal(t, expectedResponse, response) +} diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go index f57e2e7b90..478749742d 100644 --- a/pkg/ingester/transfer.go +++ b/pkg/ingester/transfer.go @@ -5,13 +5,18 @@ import ( "context" "fmt" "io" + "io/ioutil" "os" + "path/filepath" + "sync" "time" "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/thanos-io/thanos/pkg/shipper" "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -29,18 +34,112 @@ var ( Name: "cortex_ingester_received_chunks", Help: "The total number of chunks received by this ingester whilst joining", }) + sentFiles = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_sent_files", + Help: "The total number of files sent by this ingester whilst leaving.", + }) + receivedFiles = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_received_files", + Help: "The total number of files received by this ingester whilst joining", + }) + + once *sync.Once ) func init() { + once = &sync.Once{} prometheus.MustRegister(sentChunks) prometheus.MustRegister(receivedChunks) + prometheus.MustRegister(sentFiles) + prometheus.MustRegister(receivedFiles) } // TransferChunks receives all the chunks from another ingester. func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) error { + fromIngesterID := "" + seriesReceived := 0 + xfer := func() error { + userStates := newUserStates(i.limits, i.cfg) + + for { + wireSeries, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "TransferChunks: Recv") + } + + // We can't send "extra" fields with a streaming call, so we repeat + // wireSeries.FromIngesterId and assume it is the same every time + // round this loop. + if fromIngesterID == "" { + fromIngesterID = wireSeries.FromIngesterId + level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) + } + descs, err := fromWireChunks(wireSeries.Chunks) + if err != nil { + return errors.Wrap(err, "TransferChunks: fromWireChunks") + } + + state, fp, series, err := userStates.getOrCreateSeries(stream.Context(), wireSeries.UserId, wireSeries.Labels) + if err != nil { + return errors.Wrapf(err, "TransferChunks: getOrCreateSeries: user %s series %s", wireSeries.UserId, wireSeries.Labels) + } + prevNumChunks := len(series.chunkDescs) + + err = series.setChunks(descs) + state.fpLocker.Unlock(fp) // acquired in getOrCreateSeries + if err != nil { + return errors.Wrapf(err, "TransferChunks: setChunks: user %s series %s", wireSeries.UserId, wireSeries.Labels) + } + + seriesReceived++ + memoryChunks.Add(float64(len(series.chunkDescs) - prevNumChunks)) + receivedChunks.Add(float64(len(descs))) + } + + if seriesReceived == 0 { + level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) + return fmt.Errorf("TransferChunks: no series") + } + + if fromIngesterID == "" { + level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester") + return fmt.Errorf("no ingester id") + } + + if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { + return errors.Wrap(err, "TransferChunks: ClaimTokensFor") + } + + i.userStatesMtx.Lock() + defer i.userStatesMtx.Unlock() + + i.userStates = userStates + + return nil + } + + if err := i.transfer(stream.Context(), xfer); err != nil { + return err + } + + // Close the stream last, as this is what tells the "from" ingester that + // it's OK to shut down. + if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { + level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) + return err + } + level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) + + return nil +} + +func (i *Ingester) transfer(ctx context.Context, xfer func() error) error { // Enter JOINING state (only valid from PENDING) - if err := i.lifecycler.ChangeState(stream.Context(), ring.JOINING); err != nil { - return errors.Wrap(err, "TransferChunks: ChangeState") + if err := i.lifecycler.ChangeState(ctx, ring.JOINING); err != nil { + return err } // The ingesters state effectively works as a giant mutex around this whole @@ -55,84 +154,108 @@ func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) e // Enter PENDING state (only valid from JOINING) if i.lifecycler.GetState() == ring.JOINING { - if err := i.lifecycler.ChangeState(stream.Context(), ring.PENDING); err != nil { + if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { level.Error(util.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err) os.Exit(1) } } }() - userStates := newUserStates(i.limits, i.cfg) + if err := xfer(); err != nil { + return err + } + + if err := i.lifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { + return errors.Wrap(err, "Transfer: ChangeState") + } + + return nil +} + +// TransferTSDB receives all the file chunks from another ingester, and writes them to tsdb directories +func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error { fromIngesterID := "" - seriesReceived := 0 - for { - wireSeries, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return errors.Wrap(err, "TransferChunks: Recv") - } + xfer := func() error { + filesXfer := 0 - // We can't send "extra" fields with a streaming call, so we repeat - // wireSeries.FromIngesterId and assume it is the same every time - // round this loop. - if fromIngesterID == "" { - fromIngesterID = wireSeries.FromIngesterId - level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) - } - descs, err := fromWireChunks(wireSeries.Chunks) - if err != nil { - return errors.Wrap(err, "TransferChunks: fromWireChunks") - } + files := make(map[string]*os.File) + defer func() { + for _, f := range files { + if err := f.Close(); err != nil { + level.Warn(util.Logger).Log("msg", "failed to close xfer file", "err", err) + } + } + }() + for { + f, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "TransferTSDB: Recv") + } + if fromIngesterID == "" { + fromIngesterID = f.FromIngesterId + level.Info(util.Logger).Log("msg", "processing TransferTSDB request", "from_ingester", fromIngesterID) + } + filesXfer++ + + // TODO(thor) To avoid corruption from errors, it's probably best to write to a temp dir, and then move that to the final location + createfile := func(f *client.TimeSeriesFile) (*os.File, error) { + dir := filepath.Join(i.cfg.TSDBConfig.Dir, filepath.Dir(f.Filename)) + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, errors.Wrap(err, "TransferTSDB: MkdirAll") + } + file, err := os.Create(filepath.Join(i.cfg.TSDBConfig.Dir, f.Filename)) + if err != nil { + return nil, errors.Wrap(err, "TransferTSDB: Create") + } + + _, err = file.Write(f.Data) + return file, errors.Wrap(err, "TransferTSDB: Write") + } - state, fp, series, err := userStates.getOrCreateSeries(stream.Context(), wireSeries.UserId, wireSeries.Labels) - if err != nil { - return errors.Wrapf(err, "TransferChunks: getOrCreateSeries: user %s series %s", wireSeries.UserId, wireSeries.Labels) + // Create or get existing open file + file, ok := files[f.Filename] + if !ok { + file, err = createfile(f) + if err != nil { + return err + } + + files[f.Filename] = file + } else { + + // Write to existing file + if _, err := file.Write(f.Data); err != nil { + return errors.Wrap(err, "TransferTSDB: Write") + } + } } - prevNumChunks := len(series.chunkDescs) - err = series.setChunks(descs) - state.fpLocker.Unlock(fp) // acquired in getOrCreateSeries - if err != nil { - return errors.Wrapf(err, "TransferChunks: setChunks: user %s series %s", wireSeries.UserId, wireSeries.Labels) + if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { + return errors.Wrap(err, "TransferTSDB: ClaimTokensFor") } - seriesReceived++ - memoryChunks.Add(float64(len(series.chunkDescs) - prevNumChunks)) - receivedChunks.Add(float64(len(descs))) - } - - if seriesReceived == 0 { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) - return fmt.Errorf("TransferChunks: no series") - } - - if fromIngesterID == "" { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester") - return fmt.Errorf("no ingester id") - } + receivedFiles.Add(float64(filesXfer)) + level.Error(util.Logger).Log("msg", "Total files xfer", "from_ingester", fromIngesterID, "num", filesXfer) - if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { - return errors.Wrap(err, "TransferChunks: ClaimTokensFor") + return nil } - i.userStatesMtx.Lock() - defer i.userStatesMtx.Unlock() - - if err := i.lifecycler.ChangeState(stream.Context(), ring.ACTIVE); err != nil { - return errors.Wrap(err, "TransferChunks: ChangeState") + if err := i.transfer(stream.Context(), xfer); err != nil { + return err } - i.userStates = userStates // Close the stream last, as this is what tells the "from" ingester that // it's OK to shut down. - if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { - level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) + if err := stream.SendAndClose(&client.TransferTSDBResponse{}); err != nil { + level.Error(util.Logger).Log("msg", "Error closing TransferTSDB stream", "from_ingester", fromIngesterID, "err", err) return err } - level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) + level.Info(util.Logger).Log("msg", "Successfully transferred tsdbs", "from_ingester", fromIngesterID) + return nil } @@ -206,6 +329,10 @@ func (i *Ingester) TransferOut(ctx context.Context) error { } func (i *Ingester) transferOut(ctx context.Context) error { + if i.cfg.TSDBEnabled { + return i.v2TransferOut(ctx) + } + userStatesCopy := i.userStates.cp() if len(userStatesCopy) == 0 { level.Info(util.Logger).Log("msg", "nothing to transfer") @@ -275,6 +402,67 @@ func (i *Ingester) transferOut(ctx context.Context) error { return nil } +func (i *Ingester) v2TransferOut(ctx context.Context) error { + if len(i.TSDBState.dbs) == 0 { + level.Info(util.Logger).Log("msg", "nothing to transfer") + return nil + } + + // Close all user databases + wg := &sync.WaitGroup{} + // Only perform a shutdown once + once.Do(func() { + wg.Add(len(i.TSDBState.dbs)) + for _, db := range i.TSDBState.dbs { + go func(closer io.Closer) { + defer wg.Done() + if err := closer.Close(); err != nil { + level.Warn(util.Logger).Log("msg", "failed to close db", "err", err) + } + }(db) + } + }) + + targetIngester, err := i.findTargetIngester(ctx) + if err != nil { + return fmt.Errorf("cannot find ingester to transfer blocks to: %v", err) + } + + level.Info(util.Logger).Log("msg", "sending blocks", "to_ingester", targetIngester.Addr) + c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) + if err != nil { + return err + } + defer c.Close() + + ctx = user.InjectOrgID(ctx, "-1") + stream, err := c.TransferTSDB(ctx) + if err != nil { + return errors.Wrap(err, "TransferTSDB") + } + + wg.Wait() // wait for all databases to have closed + + // Grab a list of all blocks that need to be shipped + blocks, err := unshippedBlocks(i.cfg.TSDBConfig.Dir) + if err != nil { + return err + } + + for user, blockIDs := range blocks { + // Transfer the users TSDB + // TODO(thor) transferring users can be done concurrently + transferUser(ctx, stream, i.cfg.TSDBConfig.Dir, i.lifecycler.ID, user, blockIDs) + } + + _, err = stream.CloseAndRecv() + if err != nil { + return errors.Wrap(err, "CloseAndRecv") + } + + return nil +} + // findTargetIngester finds an ingester in PENDING state. func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, error) { ringDesc, err := i.lifecycler.KVStore.Get(ctx, ring.ConsulKey) @@ -289,3 +477,146 @@ func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, return &ingesters[0], nil } + +// unshippedBlocks returns a ulid list of blocks that haven't been shipped +func unshippedBlocks(dir string) (map[string][]string, error) { + userIDs, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + blocks := make(map[string][]string, len(userIDs)) + for _, user := range userIDs { + userID := user.Name() + blocks[userID] = []string{} // seed the map with the userID to ensure we xfer the WAL, even if all blocks are shipped + + blockIDs, err := ioutil.ReadDir(filepath.Join(dir, userID)) + if err != nil { + return nil, err + } + + m, err := shipper.ReadMetaFile(filepath.Join(dir, userID)) + if err != nil { + return nil, err + } + + shipped := make(map[string]bool) + for _, u := range m.Uploaded { + shipped[u.String()] = true + } + + for _, blockID := range blockIDs { + _, err := ulid.Parse(blockID.Name()) + if err != nil { + continue + } + + if _, ok := shipped[blockID.Name()]; !ok { + blocks[userID] = append(blocks[userID], blockID.Name()) + } + } + } + + return blocks, nil +} + +func transferUser(ctx context.Context, stream client.Ingester_TransferTSDBClient, dir, ingesterID, userID string, blocks []string) { + level.Info(util.Logger).Log("msg", "xfer user", "user", userID) + // Transfer all blocks + for _, blk := range blocks { + err := filepath.Walk(filepath.Join(dir, userID, blk), func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + + if info.IsDir() { + return nil + } + + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + p, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + if err := batchSend(1024*1024, b, stream, &client.TimeSeriesFile{ + FromIngesterId: ingesterID, + UserId: userID, + Filename: p, + }); err != nil { + return err + } + + sentFiles.Add(1) + return nil + }) + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to transfer all user blocks", "err", err) + } + } + + // Transfer WAL + err := filepath.Walk(filepath.Join(dir, userID, "wal"), func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + + if info.IsDir() { + return nil + } + + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + p, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + if err := batchSend(1024*1024, b, stream, &client.TimeSeriesFile{ + FromIngesterId: ingesterID, + UserId: userID, + Filename: p, + }); err != nil { + return err + } + + sentFiles.Add(1) + return nil + }) + + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to transfer user wal", "err", err) + } + + level.Info(util.Logger).Log("msg", "xfer user complete", "user", userID) +} + +func batchSend(batch int, b []byte, stream client.Ingester_TransferTSDBClient, tsfile *client.TimeSeriesFile) error { + // Split file into smaller blocks for xfer + i := 0 + for ; i+batch < len(b); i += batch { + tsfile.Data = b[i : i+batch] + err := stream.Send(tsfile) + if err != nil { + return err + } + } + + // Send final data + if i < len(b) { + tsfile.Data = b[i:] + err := stream.Send(tsfile) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go new file mode 100644 index 0000000000..35929aabd0 --- /dev/null +++ b/pkg/ingester/transfer_test.go @@ -0,0 +1,199 @@ +package ingester + +import ( + "crypto/rand" + "fmt" + "io/ioutil" + rnd "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/oklog/ulid" + "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/shipper" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type userTSDB struct { + userID string + shipPercent int + numBlocks int + meta *shipper.Meta + unshipped []string +} + +func createTSDB(t *testing.T, dir string, users []*userTSDB) { + for _, user := range users { + + os.MkdirAll(filepath.Join(dir, user.userID), 0777) + + for i := 0; i < user.numBlocks; i++ { + u, err := ulid.New(uint64(time.Now().Unix()*1000), rand.Reader) + require.NoError(t, err) + + userdir := filepath.Join(dir, user.userID) + blockDir := filepath.Join(userdir, u.String()) + require.NoError(t, os.MkdirAll(filepath.Join(blockDir, "chunks"), 0777)) + + createAndWrite := func(t *testing.T, path string) { + f, err := os.Create(path) + require.NoError(t, err) + defer f.Close() + _, err = f.Write([]byte("a man a plan a canal panama")) + require.NoError(t, err) + } + + for i := 0; i < 2; i++ { + createAndWrite(t, filepath.Join(blockDir, "chunks", fmt.Sprintf("00000%v", i))) + } + + meta := []string{"index", "meta.json", "tombstones"} + for _, name := range meta { + createAndWrite(t, filepath.Join(blockDir, name)) + } + + require.NoError(t, os.MkdirAll(filepath.Join(userdir, "wal", "checkpoint.000419"), 0777)) + createAndWrite(t, filepath.Join(userdir, "wal", "000001")) + createAndWrite(t, filepath.Join(userdir, "wal", "checkpoint.000419", "000000")) + + // Record if this block is to be "shipped" + if rnd.Intn(100) < user.shipPercent { + user.meta.Uploaded = append(user.meta.Uploaded, u) + } else { + user.unshipped = append(user.unshipped, u.String()) + } + } + + require.NoError(t, shipper.WriteMetaFile(nil, filepath.Join(dir, user.userID), user.meta)) + } +} + +func TestUnshippedBlocks(t *testing.T) { + dir, err := ioutil.TempDir("", "tsdb") + require.NoError(t, err) + + // Validate empty dir + blks, err := unshippedBlocks(dir) + require.NoError(t, err) + require.Empty(t, blks) + + /* + Create three user dirs + One of them has some blocks shipped, + One of them has all blocks shipped, + One of them has no blocks shipped, + */ + users := []*userTSDB{ + { + userID: "0", + shipPercent: 70, + numBlocks: 10, + meta: &shipper.Meta{ + Version: shipper.MetaVersion1, + }, + unshipped: []string{}, + }, + { + userID: "1", + shipPercent: 100, + numBlocks: 10, + meta: &shipper.Meta{ + Version: shipper.MetaVersion1, + }, + unshipped: []string{}, + }, + { + userID: "2", + shipPercent: 0, + numBlocks: 10, + meta: &shipper.Meta{ + Version: shipper.MetaVersion1, + }, + unshipped: []string{}, + }, + } + + createTSDB(t, dir, users) + + blks, err = unshippedBlocks(dir) + require.NoError(t, err) + for _, u := range users { + _, ok := blks[u.userID] + require.True(t, ok) + } + + // Validate the unshipped blocks against the returned list + for _, user := range users { + require.ElementsMatch(t, user.unshipped, blks[user.userID]) + } +} + +type MockTransferTSDBClient struct { + Dir string + + grpc.ClientStream +} + +func (m *MockTransferTSDBClient) Send(f *client.TimeSeriesFile) error { + dir, _ := filepath.Split(f.Filename) + if err := os.MkdirAll(filepath.Join(m.Dir, dir), 0777); err != nil { + return err + } + if _, err := os.Create(filepath.Join(m.Dir, f.Filename)); err != nil { + return err + } + return nil +} + +func (m *MockTransferTSDBClient) CloseAndRecv() (*client.TransferTSDBResponse, error) { + return &client.TransferTSDBResponse{}, nil +} + +func TestTransferUser(t *testing.T) { + dir, err := ioutil.TempDir("", "tsdb") + require.NoError(t, err) + + createTSDB(t, dir, []*userTSDB{ + { + userID: "0", + shipPercent: 0, + numBlocks: 3, + meta: &shipper.Meta{ + Version: shipper.MetaVersion1, + }, + }, + }) + + blks, err := unshippedBlocks(dir) + require.NoError(t, err) + + xfer, err := ioutil.TempDir("", "xfer") + require.NoError(t, err) + m := &MockTransferTSDBClient{ + Dir: xfer, + } + transferUser(context.Background(), m, dir, "test", "0", blks["0"]) + + var original []string + var xferfiles []string + filepath.Walk(xfer, func(path string, info os.FileInfo, err error) error { + p, _ := filepath.Rel(xfer, path) + xferfiles = append(xferfiles, p) + return nil + }) + + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if info.Name() == "thanos.shipper.json" { + return nil + } + p, _ := filepath.Rel(dir, path) + original = append(original, p) + return nil + }) + + require.Equal(t, original, xferfiles) +} diff --git a/pkg/querier/block.go b/pkg/querier/block.go new file mode 100644 index 0000000000..202ffae2b1 --- /dev/null +++ b/pkg/querier/block.go @@ -0,0 +1,160 @@ +package querier + +import ( + "context" + "io" + "time" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store/storepb" + "google.golang.org/grpc/metadata" +) + +// BlockQuerier is a querier of thanos blocks +type BlockQuerier struct { + syncTimes prometheus.Histogram + us *UserStore +} + +// NewBlockQuerier returns a client to query a s3 block store +func NewBlockQuerier(s3cfg s3.Config, baseDir string, r prometheus.Registerer) (*BlockQuerier, error) { + + b := &BlockQuerier{ + syncTimes: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_querier_sync_seconds", + Help: "The total time it takes to perform a sync stores", + Buckets: prometheus.DefBuckets, + }), + } + + r.MustRegister(b.syncTimes) + + us, err := NewUserStore(util.Logger, s3cfg, baseDir) + if err != nil { + return nil, err + } + b.us = us + + if err := us.InitialSync(context.Background()); err != nil { + level.Warn(util.Logger).Log("msg", "InitialSync failed", "err", err) + } + + stopc := make(chan struct{}) + go runutil.Repeat(30*time.Second, stopc, func() error { + ts := time.Now() + if err := us.SyncStores(context.Background()); err != nil && err != io.EOF { + level.Warn(util.Logger).Log("msg", "sync stores failed", "err", err) + } + b.syncTimes.Observe(time.Since(ts).Seconds()) + return nil + }) + + return b, nil +} + +// Get implements the ChunkStore interface. It makes a block query and converts the response into chunks +func (b *BlockQuerier) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { + client := b.us.client + + // Convert matchers to LabelMatcher + var converted []storepb.LabelMatcher + for _, m := range matchers { + var t storepb.LabelMatcher_Type + switch m.Type { + case labels.MatchEqual: + t = storepb.LabelMatcher_EQ + case labels.MatchNotEqual: + t = storepb.LabelMatcher_NEQ + case labels.MatchRegexp: + t = storepb.LabelMatcher_RE + case labels.MatchNotRegexp: + t = storepb.LabelMatcher_NRE + } + + converted = append(converted, storepb.LabelMatcher{ + Type: t, + Name: m.Name, + Value: m.Value, + }) + } + + ctx = metadata.AppendToOutgoingContext(ctx, "user", userID) + seriesClient, err := client.Series(ctx, &storepb.SeriesRequest{ + MinTime: int64(from), + MaxTime: int64(through), + Matchers: converted, + }) + if err != nil { + return nil, err + } + + var chunks []chunk.Chunk + for { + resp, err := seriesClient.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + chunks = append(chunks, seriesToChunks(userID, resp.GetSeries())...) + } + + return chunks, nil +} + +func seriesToChunks(userID string, series *storepb.Series) []chunk.Chunk { + + var lbls labels.Labels + for i := range series.Labels { + lbls = append(lbls, labels.Label{ + Name: series.Labels[i].Name, + Value: series.Labels[i].Value, + }) + } + + var chunks []chunk.Chunk + for _, c := range series.Chunks { + ch := encoding.New() + + enc, err := chunkenc.FromData(chunkenc.EncXOR, c.Raw.Data) + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to convert raw encoding to chunk", "err", err) + continue + } + + it := enc.Iterator(nil) + for it.Next() { + ts, v := it.At() + overflow, err := ch.Add(model.SamplePair{ + Timestamp: model.Time(ts), + Value: model.SampleValue(v), + }) + if err != nil { + level.Warn(util.Logger).Log("msg", "failed adding sample to chunk", "err", err) + continue + } + + if overflow != nil { + chunks = append(chunks, chunk.NewChunk(userID, client.Fingerprint(lbls), lbls, ch, model.Time(c.MinTime), model.Time(c.MaxTime))) + ch = overflow + } + } + + if ch.Len() > 0 { + chunks = append(chunks, chunk.NewChunk(userID, client.Fingerprint(lbls), lbls, ch, model.Time(c.MinTime), model.Time(c.MaxTime))) + } + } + return chunks +} diff --git a/pkg/querier/block_store.go b/pkg/querier/block_store.go new file mode 100644 index 0000000000..408bf19700 --- /dev/null +++ b/pkg/querier/block_store.go @@ -0,0 +1,240 @@ +package querier + +import ( + "context" + "fmt" + "net" + "path/filepath" + "strings" + "sync" + + "github.com/alecthomas/units" + "github.com/cortexproject/cortex/pkg/ingester" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/thanos-io/thanos/pkg/model" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/thanos/pkg/store" + storecache "github.com/thanos-io/thanos/pkg/store/cache" + "github.com/thanos-io/thanos/pkg/store/storepb" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// UserStore is a multi-tenant version of Thanos BucketStore +type UserStore struct { + logger log.Logger + cfg s3.Config + bucket objstore.BucketReader + stores map[string]*store.BucketStore + client storepb.StoreClient + baseDir string +} + +// NewUserStore returns a new UserStore +func NewUserStore(logger log.Logger, s3cfg s3.Config, baseDir string) (*UserStore, error) { + bkt, err := s3.NewBucketWithConfig(logger, s3cfg, "cortex-userstore") + if err != nil { + return nil, err + } + + u := &UserStore{ + logger: logger, + cfg: s3cfg, + bucket: bkt, + stores: make(map[string]*store.BucketStore), + baseDir: baseDir, + } + + serv := grpc.NewServer() + storepb.RegisterStoreServer(serv, u) + l, err := net.Listen("tcp", "") + if err != nil { + return nil, err + } + go serv.Serve(l) + + cc, err := grpc.Dial(l.Addr().String(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + + u.client = storepb.NewStoreClient(cc) + + return u, nil +} + +// InitialSync iterates over the s3 bucket creating user bucket stores, and calling InitialSync on each of them +func (u *UserStore) InitialSync(ctx context.Context) error { + if err := u.syncUserStores(ctx, func(ctx context.Context, s *store.BucketStore) error { + return s.InitialSync(ctx) + }); err != nil { + return err + } + + return nil +} + +// SyncStores iterates over the s3 bucket creating user bucket stores +func (u *UserStore) SyncStores(ctx context.Context) error { + if err := u.syncUserStores(ctx, func(ctx context.Context, s *store.BucketStore) error { + return s.SyncBlocks(ctx) + }); err != nil { + return err + } + + return nil +} + +func (u *UserStore) syncUserStores(ctx context.Context, f func(context.Context, *store.BucketStore) error) error { + mint, maxt := &model.TimeOrDurationValue{}, &model.TimeOrDurationValue{} + mint.Set("0000-01-01T00:00:00Z") + maxt.Set("9999-12-31T23:59:59Z") + + wg := &sync.WaitGroup{} + err := u.bucket.Iter(ctx, "", func(s string) error { + user := strings.TrimSuffix(s, "/") + + var bs *store.BucketStore + var ok bool + if bs, ok = u.stores[user]; !ok { + + level.Info(u.logger).Log("msg", "creating user bucket store", "user", user) + bkt, err := s3.NewBucketWithConfig(u.logger, u.cfg, fmt.Sprintf("cortex-%s", user)) + if err != nil { + return err + } + + // Bucket with the user wrapper + userBkt := &ingester.Bucket{ + UserID: user, + Bucket: bkt, + } + + indexCacheSizeBytes := uint64(250 * units.Mebibyte) + maxItemSizeBytes := indexCacheSizeBytes / 2 + indexCache, err := storecache.NewIndexCache(u.logger, nil, storecache.Opts{ + MaxSizeBytes: indexCacheSizeBytes, + MaxItemSizeBytes: maxItemSizeBytes, + }) + if err != nil { + return err + } + bs, err = store.NewBucketStore(u.logger, + nil, + userBkt, + filepath.Join(u.baseDir, user), + indexCache, + uint64(2*units.Gibibyte), + 0, + 20, + false, + 20, + &store.FilterConfig{ + MinTime: *mint, + MaxTime: *maxt, + }, + ) + if err != nil { + return err + } + + u.stores[user] = bs + } + + wg.Add(1) + go func(userID string, s *store.BucketStore) { + defer wg.Done() + if err := f(ctx, s); err != nil { + level.Warn(u.logger).Log("msg", "user sync failed", "user", userID) + } + }(user, bs) + + return nil + }) + + wg.Wait() + + return err +} + +// Info makes an info request to the underlying user store +func (u *UserStore) Info(ctx context.Context, req *storepb.InfoRequest) (*storepb.InfoResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, fmt.Errorf("no metadata") + } + + v := md.Get("user") + if len(v) == 0 { + return nil, fmt.Errorf("no userID") + } + + store, ok := u.stores[v[0]] + if !ok { + return nil, nil + } + + return store.Info(ctx, req) +} + +// Series makes a series request to the underlying user store +func (u *UserStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + md, ok := metadata.FromIncomingContext(srv.Context()) + if !ok { + return fmt.Errorf("no metadata") + } + + v := md.Get("user") + if len(v) == 0 { + return fmt.Errorf("no userID") + } + + store, ok := u.stores[v[0]] + if !ok { + return nil + } + + return store.Series(req, srv) +} + +// LabelNames makes a labelnames request to the underlying user store +func (u *UserStore) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, fmt.Errorf("no metadata") + } + + v := md.Get("user") + if len(v) == 0 { + return nil, fmt.Errorf("no userID") + } + + store, ok := u.stores[v[0]] + if !ok { + return nil, nil + } + + return store.LabelNames(ctx, req) +} + +// LabelValues makes a labelvalues request to the underlying user store +func (u *UserStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, fmt.Errorf("no metadata") + } + + v := md.Get("user") + if len(v) == 0 { + return nil, fmt.Errorf("no userID") + } + + store, ok := u.stores[v[0]] + if !ok { + return nil, nil + } + + return store.LabelValues(ctx, req) +} diff --git a/pkg/storage/tsdb/backend/s3/config.go b/pkg/storage/tsdb/backend/s3/config.go new file mode 100644 index 0000000000..a5b97a758c --- /dev/null +++ b/pkg/storage/tsdb/backend/s3/config.go @@ -0,0 +1,21 @@ +package s3 + +import "flag" + +// Config holds the config options for an S3 backend +type Config struct { + Endpoint string `yaml:"endpoint"` + BucketName string `yaml:"bucket_name"` + SecretAccessKey string `yaml:"secret_access_key"` + AccessKeyID string `yaml:"access_key_id"` + Insecure bool `yaml:"insecure"` +} + +// RegisterFlags registers the flags for TSDB s3 storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.AccessKeyID, "experimental.tsdb.s3.access-key-id", "", "S3 access key ID") + f.StringVar(&cfg.SecretAccessKey, "experimental.tsdb.s3.secret-access-key", "", "S3 secret access key") + f.StringVar(&cfg.BucketName, "experimental.tsdb.s3.bucket-name", "", "S3 bucket name") + f.StringVar(&cfg.Endpoint, "experimental.tsdb.s3.endpoint", "", "S3 endpoint without schema") + f.BoolVar(&cfg.Insecure, "experimental.tsdb.s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://") +} diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go new file mode 100644 index 0000000000..9415c5554d --- /dev/null +++ b/pkg/storage/tsdb/config.go @@ -0,0 +1,48 @@ +package tsdb + +import ( + "errors" + "flag" + "time" + + "github.com/cortexproject/cortex/pkg/storage/tsdb/backend/s3" +) + +const ( + // BackendS3 is the const for the s3 backend for tsdb long-term retention + BackendS3 = "s3" +) + +// Config holds the config information for TSDB storage +type Config struct { + Dir string `yaml:"dir"` + SyncDir string `yaml:"sync_dir"` + BlockRanges time.Duration `yaml:"block_ranges_period"` + Retention time.Duration `yaml:"retention_period"` + ShipInterval time.Duration `yaml:"ship_interval"` + Backend string `yaml:"backend"` + + // Backends + S3 s3.Config `yaml:"s3"` +} + +// RegisterFlags registers the TSDB flags +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.S3.RegisterFlags(f) + + f.StringVar(&cfg.Dir, "experimental.tsdb.dir", "tsdb", "directory to place all TSDB's into") + f.StringVar(&cfg.SyncDir, "experimental.tsdb.sync-dir", "tsdb-sync", "directory to place synced tsdb indicies") + f.DurationVar(&cfg.BlockRanges, "experimental.tsdb.block-ranges-period", 1*time.Hour, "TSDB block ranges") + f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB block retention") + f.DurationVar(&cfg.ShipInterval, "experimental.tsdb.ship-interval", 30*time.Second, "the frequency at which tsdb blocks are scanned for shipping") + f.StringVar(&cfg.Backend, "experimental.tsdb.backend", "s3", "TSDB storage backend to use") +} + +// Validate the config +func (cfg *Config) Validate() error { + if cfg.Backend != BackendS3 { + return errors.New("unsupported TSDB storage backend") + } + + return nil +} diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/alecthomas/template/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md new file mode 100644 index 0000000000..ef6a8ee303 --- /dev/null +++ b/vendor/github.com/alecthomas/template/README.md @@ -0,0 +1,25 @@ +# Go's `text/template` package with newline elision + +This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. + +eg. + +``` +{{if true}}\ +hello +{{end}}\ +``` + +Will result in: + +``` +hello\n +``` + +Rather than: + +``` +\n +hello\n +\n +``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go new file mode 100644 index 0000000000..223c595c25 --- /dev/null +++ b/vendor/github.com/alecthomas/template/doc.go @@ -0,0 +1,406 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package template implements data-driven templates for generating textual output. + +To generate HTML output, see package html/template, which has the same interface +as this package but automatically secures HTML output against certain attacks. + +Templates are executed by applying them to a data structure. Annotations in the +template refer to elements of the data structure (typically a field of a struct +or a key in a map) to control execution and derive values to be displayed. +Execution of the template walks the structure and sets the cursor, represented +by a period '.' and called "dot", to the value at the current location in the +structure as execution proceeds. + +The input text for a template is UTF-8-encoded text in any format. +"Actions"--data evaluations or control structures--are delimited by +"{{" and "}}"; all text outside actions is copied to the output unchanged. +Actions may not span newlines, although comments can. + +Once parsed, a template may be executed safely in parallel. + +Here is a trivial example that prints "17 items are made of wool". + + type Inventory struct { + Material string + Count uint + } + sweaters := Inventory{"wool", 17} + tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") + if err != nil { panic(err) } + err = tmpl.Execute(os.Stdout, sweaters) + if err != nil { panic(err) } + +More intricate examples appear below. + +Actions + +Here is the list of actions. "Arguments" and "pipelines" are evaluations of +data, defined in detail below. + +*/ +// {{/* a comment */}} +// A comment; discarded. May contain newlines. +// Comments do not nest and must start and end at the +// delimiters, as shown here. +/* + + {{pipeline}} + The default textual representation of the value of the pipeline + is copied to the output. + + {{if pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, T1 is executed. The empty values are false, 0, any + nil pointer or interface value, and any array, slice, map, or + string of length zero. + Dot is unaffected. + + {{if pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, T0 is executed; + otherwise, T1 is executed. Dot is unaffected. + + {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} + To simplify the appearance of if-else chains, the else action + of an if may include another if directly; the effect is exactly + the same as writing + {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} + + {{range pipeline}} T1 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, nothing is output; + otherwise, dot is set to the successive elements of the array, + slice, or map and T1 is executed. If the value is a map and the + keys are of basic type with a defined order ("comparable"), the + elements will be visited in sorted key order. + + {{range pipeline}} T1 {{else}} T0 {{end}} + The value of the pipeline must be an array, slice, map, or channel. + If the value of the pipeline has length zero, dot is unaffected and + T0 is executed; otherwise, dot is set to the successive elements + of the array, slice, or map and T1 is executed. + + {{template "name"}} + The template with the specified name is executed with nil data. + + {{template "name" pipeline}} + The template with the specified name is executed with dot set + to the value of the pipeline. + + {{with pipeline}} T1 {{end}} + If the value of the pipeline is empty, no output is generated; + otherwise, dot is set to the value of the pipeline and T1 is + executed. + + {{with pipeline}} T1 {{else}} T0 {{end}} + If the value of the pipeline is empty, dot is unaffected and T0 + is executed; otherwise, dot is set to the value of the pipeline + and T1 is executed. + +Arguments + +An argument is a simple value, denoted by one of the following. + + - A boolean, string, character, integer, floating-point, imaginary + or complex constant in Go syntax. These behave like Go's untyped + constants, although raw strings may not span newlines. + - The keyword nil, representing an untyped Go nil. + - The character '.' (period): + . + The result is the value of dot. + - A variable name, which is a (possibly empty) alphanumeric string + preceded by a dollar sign, such as + $piOver2 + or + $ + The result is the value of the variable. + Variables are described below. + - The name of a field of the data, which must be a struct, preceded + by a period, such as + .Field + The result is the value of the field. Field invocations may be + chained: + .Field1.Field2 + Fields can also be evaluated on variables, including chaining: + $x.Field1.Field2 + - The name of a key of the data, which must be a map, preceded + by a period, such as + .Key + The result is the map element value indexed by the key. + Key invocations may be chained and combined with fields to any + depth: + .Field1.Key1.Field2.Key2 + Although the key must be an alphanumeric identifier, unlike with + field names they do not need to start with an upper case letter. + Keys can also be evaluated on variables, including chaining: + $x.key1.key2 + - The name of a niladic method of the data, preceded by a period, + such as + .Method + The result is the value of invoking the method with dot as the + receiver, dot.Method(). Such a method must have one return value (of + any type) or two return values, the second of which is an error. + If it has two and the returned error is non-nil, execution terminates + and an error is returned to the caller as the value of Execute. + Method invocations may be chained and combined with fields and keys + to any depth: + .Field1.Key1.Method1.Field2.Key2.Method2 + Methods can also be evaluated on variables, including chaining: + $x.Method1.Field + - The name of a niladic function, such as + fun + The result is the value of invoking the function, fun(). The return + types and values behave as in methods. Functions and function + names are described below. + - A parenthesized instance of one the above, for grouping. The result + may be accessed by a field or map key invocation. + print (.F1 arg1) (.F2 arg2) + (.StructValuedMethod "arg").Field + +Arguments may evaluate to any type; if they are pointers the implementation +automatically indirects to the base type when required. +If an evaluation yields a function value, such as a function-valued +field of a struct, the function is not invoked automatically, but it +can be used as a truth value for an if action and the like. To invoke +it, use the call function, defined below. + +A pipeline is a possibly chained sequence of "commands". A command is a simple +value (argument) or a function or method call, possibly with multiple arguments: + + Argument + The result is the value of evaluating the argument. + .Method [Argument...] + The method can be alone or the last element of a chain but, + unlike methods in the middle of a chain, it can take arguments. + The result is the value of calling the method with the + arguments: + dot.Method(Argument1, etc.) + functionName [Argument...] + The result is the value of calling the function associated + with the name: + function(Argument1, etc.) + Functions and function names are described below. + +Pipelines + +A pipeline may be "chained" by separating a sequence of commands with pipeline +characters '|'. In a chained pipeline, the result of the each command is +passed as the last argument of the following command. The output of the final +command in the pipeline is the value of the pipeline. + +The output of a command will be either one value or two values, the second of +which has type error. If that second value is present and evaluates to +non-nil, execution terminates and the error is returned to the caller of +Execute. + +Variables + +A pipeline inside an action may initialize a variable to capture the result. +The initialization has syntax + + $variable := pipeline + +where $variable is the name of the variable. An action that declares a +variable produces no output. + +If a "range" action initializes a variable, the variable is set to the +successive elements of the iteration. Also, a "range" may declare two +variables, separated by a comma: + + range $index, $element := pipeline + +in which case $index and $element are set to the successive values of the +array/slice index or map key and element, respectively. Note that if there is +only one variable, it is assigned the element; this is opposite to the +convention in Go range clauses. + +A variable's scope extends to the "end" action of the control structure ("if", +"with", or "range") in which it is declared, or to the end of the template if +there is no such control structure. A template invocation does not inherit +variables from the point of its invocation. + +When execution begins, $ is set to the data argument passed to Execute, that is, +to the starting value of dot. + +Examples + +Here are some example one-line templates demonstrating pipelines and variables. +All produce the quoted word "output": + + {{"\"output\""}} + A string constant. + {{`"output"`}} + A raw string constant. + {{printf "%q" "output"}} + A function call. + {{"output" | printf "%q"}} + A function call whose final argument comes from the previous + command. + {{printf "%q" (print "out" "put")}} + A parenthesized argument. + {{"put" | printf "%s%s" "out" | printf "%q"}} + A more elaborate call. + {{"output" | printf "%s" | printf "%q"}} + A longer chain. + {{with "output"}}{{printf "%q" .}}{{end}} + A with action using dot. + {{with $x := "output" | printf "%q"}}{{$x}}{{end}} + A with action that creates and uses a variable. + {{with $x := "output"}}{{printf "%q" $x}}{{end}} + A with action that uses the variable in another action. + {{with $x := "output"}}{{$x | printf "%q"}}{{end}} + The same, but pipelined. + +Functions + +During execution functions are found in two function maps: first in the +template, then in the global function map. By default, no functions are defined +in the template but the Funcs method can be used to add them. + +Predefined global functions are named as follows. + + and + Returns the boolean AND of its arguments by returning the + first empty argument or the last argument, that is, + "and x y" behaves as "if x then y else x". All the + arguments are evaluated. + call + Returns the result of calling the first argument, which + must be a function, with the remaining arguments as parameters. + Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where + Y is a func-valued field, map entry, or the like. + The first argument must be the result of an evaluation + that yields a value of function type (as distinct from + a predefined function such as print). The function must + return either one or two result values, the second of which + is of type error. If the arguments don't match the function + or the returned error value is non-nil, execution stops. + html + Returns the escaped HTML equivalent of the textual + representation of its arguments. + index + Returns the result of indexing its first argument by the + following arguments. Thus "index x 1 2 3" is, in Go syntax, + x[1][2][3]. Each indexed item must be a map, slice, or array. + js + Returns the escaped JavaScript equivalent of the textual + representation of its arguments. + len + Returns the integer length of its argument. + not + Returns the boolean negation of its single argument. + or + Returns the boolean OR of its arguments by returning the + first non-empty argument or the last argument, that is, + "or x y" behaves as "if x then x else y". All the + arguments are evaluated. + print + An alias for fmt.Sprint + printf + An alias for fmt.Sprintf + println + An alias for fmt.Sprintln + urlquery + Returns the escaped value of the textual representation of + its arguments in a form suitable for embedding in a URL query. + +The boolean functions take any zero value to be false and a non-zero +value to be true. + +There is also a set of binary comparison operators defined as +functions: + + eq + Returns the boolean truth of arg1 == arg2 + ne + Returns the boolean truth of arg1 != arg2 + lt + Returns the boolean truth of arg1 < arg2 + le + Returns the boolean truth of arg1 <= arg2 + gt + Returns the boolean truth of arg1 > arg2 + ge + Returns the boolean truth of arg1 >= arg2 + +For simpler multi-way equality tests, eq (only) accepts two or more +arguments and compares the second and subsequent to the first, +returning in effect + + arg1==arg2 || arg1==arg3 || arg1==arg4 ... + +(Unlike with || in Go, however, eq is a function call and all the +arguments will be evaluated.) + +The comparison functions work on basic types only (or named basic +types, such as "type Celsius float32"). They implement the Go rules +for comparison of values, except that size and exact type are +ignored, so any integer value, signed or unsigned, may be compared +with any other integer value. (The arithmetic value is compared, +not the bit pattern, so all negative integers are less than all +unsigned integers.) However, as usual, one may not compare an int +with a float32 and so on. + +Associated templates + +Each template is named by a string specified when it is created. Also, each +template is associated with zero or more other templates that it may invoke by +name; such associations are transitive and form a name space of templates. + +A template may use a template invocation to instantiate another associated +template; see the explanation of the "template" action above. The name must be +that of a template associated with the template that contains the invocation. + +Nested template definitions + +When parsing a template, another template may be defined and associated with the +template being parsed. Template definitions must appear at the top level of the +template, much like global variables in a Go program. + +The syntax of such definitions is to surround each template declaration with a +"define" and "end" action. + +The define action names the template being created by providing a string +constant. Here is a simple example: + + `{{define "T1"}}ONE{{end}} + {{define "T2"}}TWO{{end}} + {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} + {{template "T3"}}` + +This defines two templates, T1 and T2, and a third T3 that invokes the other two +when it is executed. Finally it invokes T3. If executed this template will +produce the text + + ONE TWO + +By construction, a template may reside in only one association. If it's +necessary to have a template addressable from multiple associations, the +template definition must be parsed multiple times to create distinct *Template +values, or must be copied with the Clone or AddParseTree method. + +Parse may be called multiple times to assemble the various associated templates; +see the ParseFiles and ParseGlob functions and methods for simple ways to parse +related templates stored in files. + +A template may be executed directly or through ExecuteTemplate, which executes +an associated template identified by name. To invoke our example above, we +might write, + + err := tmpl.Execute(os.Stdout, "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +or to invoke a particular template explicitly by name, + + err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") + if err != nil { + log.Fatalf("execution failed: %s", err) + } + +*/ +package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go new file mode 100644 index 0000000000..c3078e5d0c --- /dev/null +++ b/vendor/github.com/alecthomas/template/exec.go @@ -0,0 +1,845 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "fmt" + "io" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/alecthomas/template/parse" +) + +// state represents the state of an execution. It's not part of the +// template so that multiple executions of the same template +// can execute in parallel. +type state struct { + tmpl *Template + wr io.Writer + node parse.Node // current node, for errors + vars []variable // push-down stack of variable values. +} + +// variable holds the dynamic value of a variable such as $, $x etc. +type variable struct { + name string + value reflect.Value +} + +// push pushes a new variable on the stack. +func (s *state) push(name string, value reflect.Value) { + s.vars = append(s.vars, variable{name, value}) +} + +// mark returns the length of the variable stack. +func (s *state) mark() int { + return len(s.vars) +} + +// pop pops the variable stack up to the mark. +func (s *state) pop(mark int) { + s.vars = s.vars[0:mark] +} + +// setVar overwrites the top-nth variable on the stack. Used by range iterations. +func (s *state) setVar(n int, value reflect.Value) { + s.vars[len(s.vars)-n].value = value +} + +// varValue returns the value of the named variable. +func (s *state) varValue(name string) reflect.Value { + for i := s.mark() - 1; i >= 0; i-- { + if s.vars[i].name == name { + return s.vars[i].value + } + } + s.errorf("undefined variable: %s", name) + return zero +} + +var zero reflect.Value + +// at marks the state to be on node n, for error reporting. +func (s *state) at(node parse.Node) { + s.node = node +} + +// doublePercent returns the string with %'s replaced by %%, if necessary, +// so it can be used safely inside a Printf format string. +func doublePercent(str string) string { + if strings.Contains(str, "%") { + str = strings.Replace(str, "%", "%%", -1) + } + return str +} + +// errorf formats the error and terminates processing. +func (s *state) errorf(format string, args ...interface{}) { + name := doublePercent(s.tmpl.Name()) + if s.node == nil { + format = fmt.Sprintf("template: %s: %s", name, format) + } else { + location, context := s.tmpl.ErrorContext(s.node) + format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) + } + panic(fmt.Errorf(format, args...)) +} + +// errRecover is the handler that turns panics into returns from the top +// level of Parse. +func errRecover(errp *error) { + e := recover() + if e != nil { + switch err := e.(type) { + case runtime.Error: + panic(e) + case error: + *errp = err + default: + panic(e) + } + } +} + +// ExecuteTemplate applies the template associated with t that has the given name +// to the specified data object and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { + tmpl := t.tmpl[name] + if tmpl == nil { + return fmt.Errorf("template: no template %q associated with template %q", name, t.name) + } + return tmpl.Execute(wr, data) +} + +// Execute applies a parsed template to the specified data object, +// and writes the output to wr. +// If an error occurs executing the template or writing its output, +// execution stops, but partial results may already have been written to +// the output writer. +// A template may be executed safely in parallel. +func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { + defer errRecover(&err) + value := reflect.ValueOf(data) + state := &state{ + tmpl: t, + wr: wr, + vars: []variable{{"$", value}}, + } + t.init() + if t.Tree == nil || t.Root == nil { + var b bytes.Buffer + for name, tmpl := range t.tmpl { + if tmpl.Tree == nil || tmpl.Root == nil { + continue + } + if b.Len() > 0 { + b.WriteString(", ") + } + fmt.Fprintf(&b, "%q", name) + } + var s string + if b.Len() > 0 { + s = "; defined templates are: " + b.String() + } + state.errorf("%q is an incomplete or empty template%s", t.Name(), s) + } + state.walk(value, t.Root) + return +} + +// Walk functions step through the major pieces of the template structure, +// generating output as they go. +func (s *state) walk(dot reflect.Value, node parse.Node) { + s.at(node) + switch node := node.(type) { + case *parse.ActionNode: + // Do not pop variables so they persist until next end. + // Also, if the action declares variables, don't print the result. + val := s.evalPipeline(dot, node.Pipe) + if len(node.Pipe.Decl) == 0 { + s.printValue(node, val) + } + case *parse.IfNode: + s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) + case *parse.ListNode: + for _, node := range node.Nodes { + s.walk(dot, node) + } + case *parse.RangeNode: + s.walkRange(dot, node) + case *parse.TemplateNode: + s.walkTemplate(dot, node) + case *parse.TextNode: + if _, err := s.wr.Write(node.Text); err != nil { + s.errorf("%s", err) + } + case *parse.WithNode: + s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) + default: + s.errorf("unknown node: %s", node) + } +} + +// walkIfOrWith walks an 'if' or 'with' node. The two control structures +// are identical in behavior except that 'with' sets dot. +func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { + defer s.pop(s.mark()) + val := s.evalPipeline(dot, pipe) + truth, ok := isTrue(val) + if !ok { + s.errorf("if/with can't use %v", val) + } + if truth { + if typ == parse.NodeWith { + s.walk(val, list) + } else { + s.walk(dot, list) + } + } else if elseList != nil { + s.walk(dot, elseList) + } +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} + +func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { + s.at(r) + defer s.pop(s.mark()) + val, _ := indirect(s.evalPipeline(dot, r.Pipe)) + // mark top of stack before any variables in the body are pushed. + mark := s.mark() + oneIteration := func(index, elem reflect.Value) { + // Set top var (lexically the second if there are two) to the element. + if len(r.Pipe.Decl) > 0 { + s.setVar(1, elem) + } + // Set next var (lexically the first if there are two) to the index. + if len(r.Pipe.Decl) > 1 { + s.setVar(2, index) + } + s.walk(elem, r.List) + s.pop(mark) + } + switch val.Kind() { + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + break + } + for i := 0; i < val.Len(); i++ { + oneIteration(reflect.ValueOf(i), val.Index(i)) + } + return + case reflect.Map: + if val.Len() == 0 { + break + } + for _, key := range sortKeys(val.MapKeys()) { + oneIteration(key, val.MapIndex(key)) + } + return + case reflect.Chan: + if val.IsNil() { + break + } + i := 0 + for ; ; i++ { + elem, ok := val.Recv() + if !ok { + break + } + oneIteration(reflect.ValueOf(i), elem) + } + if i == 0 { + break + } + return + case reflect.Invalid: + break // An invalid value is likely a nil map, etc. and acts like an empty map. + default: + s.errorf("range can't iterate over %v", val) + } + if r.ElseList != nil { + s.walk(dot, r.ElseList) + } +} + +func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { + s.at(t) + tmpl := s.tmpl.tmpl[t.Name] + if tmpl == nil { + s.errorf("template %q not defined", t.Name) + } + // Variables declared by the pipeline persist. + dot = s.evalPipeline(dot, t.Pipe) + newState := *s + newState.tmpl = tmpl + // No dynamic scoping: template invocations inherit no variables. + newState.vars = []variable{{"$", dot}} + newState.walk(dot, tmpl.Root) +} + +// Eval functions evaluate pipelines, commands, and their elements and extract +// values from the data structure by examining fields, calling methods, and so on. +// The printing of those values happens only through walk functions. + +// evalPipeline returns the value acquired by evaluating a pipeline. If the +// pipeline has a variable declaration, the variable will be pushed on the +// stack. Callers should therefore pop the stack after they are finished +// executing commands depending on the pipeline value. +func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { + if pipe == nil { + return + } + s.at(pipe) + for _, cmd := range pipe.Cmds { + value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. + // If the object has type interface{}, dig down one level to the thing inside. + if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { + value = reflect.ValueOf(value.Interface()) // lovely! + } + } + for _, variable := range pipe.Decl { + s.push(variable.Ident[0], value) + } + return value +} + +func (s *state) notAFunction(args []parse.Node, final reflect.Value) { + if len(args) > 1 || final.IsValid() { + s.errorf("can't give argument to non-function %s", args[0]) + } +} + +func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { + firstWord := cmd.Args[0] + switch n := firstWord.(type) { + case *parse.FieldNode: + return s.evalFieldNode(dot, n, cmd.Args, final) + case *parse.ChainNode: + return s.evalChainNode(dot, n, cmd.Args, final) + case *parse.IdentifierNode: + // Must be a function. + return s.evalFunction(dot, n, cmd, cmd.Args, final) + case *parse.PipeNode: + // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. + return s.evalPipeline(dot, n) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, cmd.Args, final) + } + s.at(firstWord) + s.notAFunction(cmd.Args, final) + switch word := firstWord.(type) { + case *parse.BoolNode: + return reflect.ValueOf(word.True) + case *parse.DotNode: + return dot + case *parse.NilNode: + s.errorf("nil is not a command") + case *parse.NumberNode: + return s.idealConstant(word) + case *parse.StringNode: + return reflect.ValueOf(word.Text) + } + s.errorf("can't evaluate command %q", firstWord) + panic("not reached") +} + +// idealConstant is called to return the value of a number in a context where +// we don't know the type. In that case, the syntax of the number tells us +// its type, and we use Go rules to resolve. Note there is no such thing as +// a uint ideal constant in this situation - the value must be of int type. +func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { + // These are ideal constants but we don't know the type + // and we have no context. (If it was a method argument, + // we'd know what we need.) The syntax guides us to some extent. + s.at(constant) + switch { + case constant.IsComplex: + return reflect.ValueOf(constant.Complex128) // incontrovertible. + case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: + return reflect.ValueOf(constant.Float64) + case constant.IsInt: + n := int(constant.Int64) + if int64(n) != constant.Int64 { + s.errorf("%s overflows int", constant.Text) + } + return reflect.ValueOf(n) + case constant.IsUint: + s.errorf("%s overflows int", constant.Text) + } + return zero +} + +func isHexConstant(s string) bool { + return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') +} + +func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(field) + return s.evalFieldChain(dot, dot, field, field.Ident, args, final) +} + +func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { + s.at(chain) + // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. + pipe := s.evalArg(dot, nil, chain.Node) + if len(chain.Field) == 0 { + s.errorf("internal error: no fields in evalChainNode") + } + return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) +} + +func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { + // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. + s.at(variable) + value := s.varValue(variable.Ident[0]) + if len(variable.Ident) == 1 { + s.notAFunction(args, final) + return value + } + return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) +} + +// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. +// dot is the environment in which to evaluate arguments, while +// receiver is the value being walked along the chain. +func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { + n := len(ident) + for i := 0; i < n-1; i++ { + receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) + } + // Now if it's a method, it gets the arguments. + return s.evalField(dot, ident[n-1], node, args, final, receiver) +} + +func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { + s.at(node) + name := node.Ident + function, ok := findFunction(name, s.tmpl) + if !ok { + s.errorf("%q is not a defined function", name) + } + return s.evalCall(dot, function, cmd, name, args, final) +} + +// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). +// The 'final' argument represents the return value from the preceding +// value of the pipeline, if any. +func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { + if !receiver.IsValid() { + return zero + } + typ := receiver.Type() + receiver, _ = indirect(receiver) + // Unless it's an interface, need to get to a value of type *T to guarantee + // we see all methods of T and *T. + ptr := receiver + if ptr.Kind() != reflect.Interface && ptr.CanAddr() { + ptr = ptr.Addr() + } + if method := ptr.MethodByName(fieldName); method.IsValid() { + return s.evalCall(dot, method, node, fieldName, args, final) + } + hasArgs := len(args) > 1 || final.IsValid() + // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. + receiver, isNil := indirect(receiver) + if isNil { + s.errorf("nil pointer evaluating %s.%s", typ, fieldName) + } + switch receiver.Kind() { + case reflect.Struct: + tField, ok := receiver.Type().FieldByName(fieldName) + if ok { + field := receiver.FieldByIndex(tField.Index) + if tField.PkgPath != "" { // field is unexported + s.errorf("%s is an unexported field of struct type %s", fieldName, typ) + } + // If it's a function, we must call it. + if hasArgs { + s.errorf("%s has arguments but cannot be invoked as function", fieldName) + } + return field + } + s.errorf("%s is not a field of struct type %s", fieldName, typ) + case reflect.Map: + // If it's a map, attempt to use the field name as a key. + nameVal := reflect.ValueOf(fieldName) + if nameVal.Type().AssignableTo(receiver.Type().Key()) { + if hasArgs { + s.errorf("%s is not a method but has arguments", fieldName) + } + return receiver.MapIndex(nameVal) + } + } + s.errorf("can't evaluate field %s in type %s", fieldName, typ) + panic("not reached") +} + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so +// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] +// as the function itself. +func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { + if args != nil { + args = args[1:] // Zeroth arg is function name/node; not passed to function. + } + typ := fun.Type() + numIn := len(args) + if final.IsValid() { + numIn++ + } + numFixed := len(args) + if typ.IsVariadic() { + numFixed = typ.NumIn() - 1 // last arg is the variadic one. + if numIn < numFixed { + s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) + } + } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { + s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) + } + if !goodFunc(typ) { + // TODO: This could still be a confusing error; maybe goodFunc should provide info. + s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) + } + // Build the arg list. + argv := make([]reflect.Value, numIn) + // Args must be evaluated. Fixed args first. + i := 0 + for ; i < numFixed && i < len(args); i++ { + argv[i] = s.evalArg(dot, typ.In(i), args[i]) + } + // Now the ... args. + if typ.IsVariadic() { + argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. + for ; i < len(args); i++ { + argv[i] = s.evalArg(dot, argType, args[i]) + } + } + // Add final value if necessary. + if final.IsValid() { + t := typ.In(typ.NumIn() - 1) + if typ.IsVariadic() { + t = t.Elem() + } + argv[i] = s.validateType(final, t) + } + result := fun.Call(argv) + // If we have an error that is not nil, stop execution and return that error to the caller. + if len(result) == 2 && !result[1].IsNil() { + s.at(node) + s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + } + return result[0] +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// validateType guarantees that the value is valid and assignable to the type. +func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { + if !value.IsValid() { + if typ == nil || canBeNil(typ) { + // An untyped nil interface{}. Accept as a proper nil value. + return reflect.Zero(typ) + } + s.errorf("invalid value; expected %s", typ) + } + if typ != nil && !value.Type().AssignableTo(typ) { + if value.Kind() == reflect.Interface && !value.IsNil() { + value = value.Elem() + if value.Type().AssignableTo(typ) { + return value + } + // fallthrough + } + // Does one dereference or indirection work? We could do more, as we + // do with method receivers, but that gets messy and method receivers + // are much more constrained, so it makes more sense there than here. + // Besides, one is almost always all you need. + switch { + case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): + value = value.Elem() + if !value.IsValid() { + s.errorf("dereference of nil pointer of type %s", typ) + } + case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): + value = value.Addr() + default: + s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) + } + } + return value +} + +func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + switch arg := n.(type) { + case *parse.DotNode: + return s.validateType(dot, typ) + case *parse.NilNode: + if canBeNil(typ) { + return reflect.Zero(typ) + } + s.errorf("cannot assign nil to %s", typ) + case *parse.FieldNode: + return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) + case *parse.VariableNode: + return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) + case *parse.PipeNode: + return s.validateType(s.evalPipeline(dot, arg), typ) + case *parse.IdentifierNode: + return s.evalFunction(dot, arg, arg, nil, zero) + case *parse.ChainNode: + return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) + } + switch typ.Kind() { + case reflect.Bool: + return s.evalBool(typ, n) + case reflect.Complex64, reflect.Complex128: + return s.evalComplex(typ, n) + case reflect.Float32, reflect.Float64: + return s.evalFloat(typ, n) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return s.evalInteger(typ, n) + case reflect.Interface: + if typ.NumMethod() == 0 { + return s.evalEmptyInterface(dot, n) + } + case reflect.String: + return s.evalString(typ, n) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return s.evalUnsignedInteger(typ, n) + } + s.errorf("can't handle %s for arg of type %s", n, typ) + panic("not reached") +} + +func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.BoolNode); ok { + value := reflect.New(typ).Elem() + value.SetBool(n.True) + return value + } + s.errorf("expected bool; found %s", n) + panic("not reached") +} + +func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.StringNode); ok { + value := reflect.New(typ).Elem() + value.SetString(n.Text) + return value + } + s.errorf("expected string; found %s", n) + panic("not reached") +} + +func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsInt { + value := reflect.New(typ).Elem() + value.SetInt(n.Int64) + return value + } + s.errorf("expected integer; found %s", n) + panic("not reached") +} + +func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsUint { + value := reflect.New(typ).Elem() + value.SetUint(n.Uint64) + return value + } + s.errorf("expected unsigned integer; found %s", n) + panic("not reached") +} + +func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { + s.at(n) + if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { + value := reflect.New(typ).Elem() + value.SetFloat(n.Float64) + return value + } + s.errorf("expected float; found %s", n) + panic("not reached") +} + +func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { + if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { + value := reflect.New(typ).Elem() + value.SetComplex(n.Complex128) + return value + } + s.errorf("expected complex; found %s", n) + panic("not reached") +} + +func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { + s.at(n) + switch n := n.(type) { + case *parse.BoolNode: + return reflect.ValueOf(n.True) + case *parse.DotNode: + return dot + case *parse.FieldNode: + return s.evalFieldNode(dot, n, nil, zero) + case *parse.IdentifierNode: + return s.evalFunction(dot, n, n, nil, zero) + case *parse.NilNode: + // NilNode is handled in evalArg, the only place that calls here. + s.errorf("evalEmptyInterface: nil (can't happen)") + case *parse.NumberNode: + return s.idealConstant(n) + case *parse.StringNode: + return reflect.ValueOf(n.Text) + case *parse.VariableNode: + return s.evalVariableNode(dot, n, nil, zero) + case *parse.PipeNode: + return s.evalPipeline(dot, n) + } + s.errorf("can't handle assignment of %s to empty interface argument", n) + panic("not reached") +} + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printValue writes the textual representation of the value to the output of +// the template. +func (s *state) printValue(n parse.Node, v reflect.Value) { + s.at(n) + iface, ok := printableValue(v) + if !ok { + s.errorf("can't print %s of type %s", n, v.Type()) + } + fmt.Fprint(s.wr, iface) +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// Types to help sort the keys in a map for reproducible output. + +type rvs []reflect.Value + +func (x rvs) Len() int { return len(x) } +func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +type rvInts struct{ rvs } + +func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } + +type rvUints struct{ rvs } + +func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } + +type rvFloats struct{ rvs } + +func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } + +type rvStrings struct{ rvs } + +func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } + +// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. +func sortKeys(v []reflect.Value) []reflect.Value { + if len(v) <= 1 { + return v + } + switch v[0].Kind() { + case reflect.Float32, reflect.Float64: + sort.Sort(rvFloats{v}) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sort.Sort(rvInts{v}) + case reflect.String: + sort.Sort(rvStrings{v}) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + sort.Sort(rvUints{v}) + } + return v +} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go new file mode 100644 index 0000000000..39ee5ed68f --- /dev/null +++ b/vendor/github.com/alecthomas/template/funcs.go @@ -0,0 +1,598 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// addFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string, tmpl *Template) (reflect.Value, bool) { + if tmpl != nil && tmpl.common != nil { + if fn := tmpl.execFuncs[name]; fn.IsValid() { + return fn, true + } + } + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +} diff --git a/vendor/github.com/alecthomas/template/go.mod b/vendor/github.com/alecthomas/template/go.mod new file mode 100644 index 0000000000..a70670ae21 --- /dev/null +++ b/vendor/github.com/alecthomas/template/go.mod @@ -0,0 +1 @@ +module github.com/alecthomas/template diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go new file mode 100644 index 0000000000..3636fb54d6 --- /dev/null +++ b/vendor/github.com/alecthomas/template/helper.go @@ -0,0 +1,108 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions to make constructing templates easier. + +package template + +import ( + "fmt" + "io/ioutil" + "path/filepath" +) + +// Functions and methods to parse templates. + +// Must is a helper that wraps a call to a function returning (*Template, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var t = template.Must(template.New("name").Parse("text")) +func Must(t *Template, err error) *Template { + if err != nil { + panic(err) + } + return t +} + +// ParseFiles creates a new Template and parses the template definitions from +// the named files. The returned template's name will have the (base) name and +// (parsed) contents of the first file. There must be at least one file. +// If an error occurs, parsing stops and the returned *Template is nil. +func ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(nil, filenames...) +} + +// ParseFiles parses the named files and associates the resulting templates with +// t. If an error occurs, parsing stops and the returned template is nil; +// otherwise it is t. There must be at least one file. +func (t *Template) ParseFiles(filenames ...string) (*Template, error) { + return parseFiles(t, filenames...) +} + +// parseFiles is the helper for the method and function. If the argument +// template is nil, it is created from the first file. +func parseFiles(t *Template, filenames ...string) (*Template, error) { + if len(filenames) == 0 { + // Not really a problem, but be consistent. + return nil, fmt.Errorf("template: no files named in call to ParseFiles") + } + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + s := string(b) + name := filepath.Base(filename) + // First template becomes return value if not already defined, + // and we use that one for subsequent New calls to associate + // all the templates together. Also, if this file has the same name + // as t, this file becomes the contents of t, so + // t, err := New(name).Funcs(xxx).ParseFiles(name) + // works. Otherwise we create a new template associated with t. + var tmpl *Template + if t == nil { + t = New(name) + } + if name == t.Name() { + tmpl = t + } else { + tmpl = t.New(name) + } + _, err = tmpl.Parse(s) + if err != nil { + return nil, err + } + } + return t, nil +} + +// ParseGlob creates a new Template and parses the template definitions from the +// files identified by the pattern, which must match at least one file. The +// returned template will have the (base) name and (parsed) contents of the +// first file matched by the pattern. ParseGlob is equivalent to calling +// ParseFiles with the list of files matched by the pattern. +func ParseGlob(pattern string) (*Template, error) { + return parseGlob(nil, pattern) +} + +// ParseGlob parses the template definitions in the files identified by the +// pattern and associates the resulting templates with t. The pattern is +// processed by filepath.Glob and must match at least one file. ParseGlob is +// equivalent to calling t.ParseFiles with the list of files matched by the +// pattern. +func (t *Template) ParseGlob(pattern string) (*Template, error) { + return parseGlob(t, pattern) +} + +// parseGlob is the implementation of the function and method ParseGlob. +func parseGlob(t *Template, pattern string) (*Template, error) { + filenames, err := filepath.Glob(pattern) + if err != nil { + return nil, err + } + if len(filenames) == 0 { + return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) + } + return parseFiles(t, filenames...) +} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go new file mode 100644 index 0000000000..55f1c051e8 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/lex.go @@ -0,0 +1,556 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package parse + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ > itemKeyword: + return fmt.Sprintf("<%s>", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemBool // boolean constant + itemChar // printable ASCII character; grab bag for comma etc. + itemCharConstant // character constant + itemComplex // complex constant (1+2i); imaginary is just a number + itemColonEquals // colon-equals (':=') introducing a declaration + itemEOF + itemField // alphanumeric identifier starting with '.' + itemIdentifier // alphanumeric identifier not starting with '.' + itemLeftDelim // left action delimiter + itemLeftParen // '(' inside action + itemNumber // simple number, including imaginary + itemPipe // pipe symbol + itemRawString // raw quoted string (includes quotes) + itemRightDelim // right action delimiter + itemElideNewline // elide newline after right delim + itemRightParen // ')' inside action + itemSpace // run of spaces separating arguments + itemString // quoted string (includes quotes) + itemText // plain text + itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' + // Keywords appear after all the rest. + itemKeyword // used only to delimit the keywords + itemDot // the cursor, spelled '.' + itemDefine // define keyword + itemElse // else keyword + itemEnd // end keyword + itemIf // if keyword + itemNil // the untyped nil constant, easiest to treat as a keyword + itemRange // range keyword + itemTemplate // template keyword + itemWith // with keyword +) + +var key = map[string]itemType{ + ".": itemDot, + "define": itemDefine, + "else": itemElse, + "end": itemEnd, + "if": itemIf, + "range": itemRange, + "nil": itemNil, + "template": itemTemplate, + "with": itemWith, +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + name string // the name of the input; used only for error reports + input string // the string being scanned + leftDelim string // start of action + rightDelim string // end of action + state stateFn // the next lexing function to enter + pos Pos // current position in the input + start Pos // start position of this item + width Pos // width of last rune read from input + lastPos Pos // position of most recent item returned by nextItem + items chan item // channel of scanned items + parenDepth int // nesting depth of ( ) exprs +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.IndexRune(valid, l.next()) >= 0 { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(name, input, left, right string) *lexer { + if left == "" { + left = leftDelim + } + if right == "" { + right = rightDelim + } + l := &lexer{ + name: name, + input: input, + leftDelim: left, + rightDelim: right, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexText; l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +const ( + leftDelim = "{{" + rightDelim = "}}" + leftComment = "/*" + rightComment = "*/" +) + +// lexText scans until an opening action delimiter, "{{". +func lexText(l *lexer) stateFn { + for { + if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { + if l.pos > l.start { + l.emit(itemText) + } + return lexLeftDelim + } + if l.next() == eof { + break + } + } + // Correctly reached EOF. + if l.pos > l.start { + l.emit(itemText) + } + l.emit(itemEOF) + return nil +} + +// lexLeftDelim scans the left delimiter, which is known to be present. +func lexLeftDelim(l *lexer) stateFn { + l.pos += Pos(len(l.leftDelim)) + if strings.HasPrefix(l.input[l.pos:], leftComment) { + return lexComment + } + l.emit(itemLeftDelim) + l.parenDepth = 0 + return lexInsideAction +} + +// lexComment scans a comment. The left comment marker is known to be present. +func lexComment(l *lexer) stateFn { + l.pos += Pos(len(leftComment)) + i := strings.Index(l.input[l.pos:], rightComment) + if i < 0 { + return l.errorf("unclosed comment") + } + l.pos += Pos(i + len(rightComment)) + if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + return l.errorf("comment ends before closing delimiter") + + } + l.pos += Pos(len(l.rightDelim)) + l.ignore() + return lexText +} + +// lexRightDelim scans the right delimiter, which is known to be present. +func lexRightDelim(l *lexer) stateFn { + l.pos += Pos(len(l.rightDelim)) + l.emit(itemRightDelim) + if l.peek() == '\\' { + l.pos++ + l.emit(itemElideNewline) + } + return lexText +} + +// lexInsideAction scans the elements inside action delimiters. +func lexInsideAction(l *lexer) stateFn { + // Either number, quoted string, or identifier. + // Spaces separate arguments; runs of spaces turn into itemSpace. + // Pipe symbols separate and are emitted. + if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { + if l.parenDepth == 0 { + return lexRightDelim + } + return l.errorf("unclosed left paren") + } + switch r := l.next(); { + case r == eof || isEndOfLine(r): + return l.errorf("unclosed action") + case isSpace(r): + return lexSpace + case r == ':': + if l.next() != '=' { + return l.errorf("expected :=") + } + l.emit(itemColonEquals) + case r == '|': + l.emit(itemPipe) + case r == '"': + return lexQuote + case r == '`': + return lexRawQuote + case r == '$': + return lexVariable + case r == '\'': + return lexChar + case r == '.': + // special look-ahead for ".field" so we don't break l.backup(). + if l.pos < Pos(len(l.input)) { + r := l.input[l.pos] + if r < '0' || '9' < r { + return lexField + } + } + fallthrough // '.' can start a number. + case r == '+' || r == '-' || ('0' <= r && r <= '9'): + l.backup() + return lexNumber + case isAlphaNumeric(r): + l.backup() + return lexIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexInsideAction + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right paren %#U", r) + } + return lexInsideAction + case r <= unicode.MaxASCII && unicode.IsPrint(r): + l.emit(itemChar) + return lexInsideAction + default: + return l.errorf("unrecognized character in action: %#U", r) + } + return lexInsideAction +} + +// lexSpace scans a run of space characters. +// One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.emit(itemSpace) + return lexInsideAction +} + +// lexIdentifier scans an alphanumeric. +func lexIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r): + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + switch { + case key[word] > itemKeyword: + l.emit(key[word]) + case word[0] == '.': + l.emit(itemField) + case word == "true", word == "false": + l.emit(itemBool) + default: + l.emit(itemIdentifier) + } + break Loop + } + } + return lexInsideAction +} + +// lexField scans a field: .Alphanumeric. +// The . has been scanned. +func lexField(l *lexer) stateFn { + return lexFieldOrVariable(l, itemField) +} + +// lexVariable scans a Variable: $Alphanumeric. +// The $ has been scanned. +func lexVariable(l *lexer) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "$". + l.emit(itemVariable) + return lexInsideAction + } + return lexFieldOrVariable(l, itemVariable) +} + +// lexVariable scans a field or variable: [.$]Alphanumeric. +// The . or $ has been scanned. +func lexFieldOrVariable(l *lexer, typ itemType) stateFn { + if l.atTerminator() { // Nothing interesting follows -> "." or "$". + if typ == itemVariable { + l.emit(itemVariable) + } else { + l.emit(itemDot) + } + return lexInsideAction + } + var r rune + for { + r = l.next() + if !isAlphaNumeric(r) { + l.backup() + break + } + } + if !l.atTerminator() { + return l.errorf("bad character %#U", r) + } + l.emit(typ) + return lexInsideAction +} + +// atTerminator reports whether the input is at valid termination character to +// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases +// like "$x+2" not being acceptable without a space, in case we decide one +// day to implement arithmetic. +func (l *lexer) atTerminator() bool { + r := l.peek() + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '|', ':', ')', '(': + return true + } + // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will + // succeed but should fail) but only in extremely rare cases caused by willfully + // bad choice of delimiter. + if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { + return true + } + return false +} + +// lexChar scans a character constant. The initial quote is already +// scanned. Syntax checking is done by the parser. +func lexChar(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated character constant") + case '\'': + break Loop + } + } + l.emit(itemCharConstant) + return lexInsideAction +} + +// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This +// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" +// and "089" - but when it's wrong the input is invalid and the parser (via +// strconv) will notice. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + if sign := l.peek(); sign == '+' || sign == '-' { + // Complex: 1+2i. No spaces, must end in 'i'. + if !l.scanNumber() || l.input[l.pos-1] != 'i' { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemComplex) + } else { + l.emit(itemNumber) + } + return lexInsideAction +} + +func (l *lexer) scanNumber() bool { + // Optional leading sign. + l.accept("+-") + // Is it hex? + digits := "0123456789" + if l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Is it imaginary? + l.accept("i") + // Next thing mustn't be alphanumeric. + if isAlphaNumeric(l.peek()) { + l.next() + return false + } + return true +} + +// lexQuote scans a quoted string. +func lexQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != eof && r != '\n' { + break + } + fallthrough + case eof, '\n': + return l.errorf("unterminated quoted string") + case '"': + break Loop + } + } + l.emit(itemString) + return lexInsideAction +} + +// lexRawQuote scans a raw quoted string. +func lexRawQuote(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case eof, '\n': + return l.errorf("unterminated raw quoted string") + case '`': + break Loop + } + } + l.emit(itemRawString) + return lexInsideAction +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go new file mode 100644 index 0000000000..55c37f6dba --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/node.go @@ -0,0 +1,834 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parse nodes. + +package parse + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +var textFormat = "%s" // Changed to "%q" in tests for better error messages. + +// A Node is an element in the parse tree. The interface is trivial. +// The interface contains an unexported method so that only +// types local to this package can satisfy it. +type Node interface { + Type() NodeType + String() string + // Copy does a deep copy of the Node and all its components. + // To avoid type assertions, some XxxNodes also have specialized + // CopyXxx methods that return *XxxNode. + Copy() Node + Position() Pos // byte position of start of node in full original input string + // tree returns the containing *Tree. + // It is unexported so all implementations of Node are in this package. + tree() *Tree +} + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Pos represents a byte position in the original input text from which +// this template was parsed. +type Pos int + +func (p Pos) Position() Pos { + return p +} + +// Type returns itself and provides an easy default implementation +// for embedding in a Node. Embedded in all non-trivial Nodes. +func (t NodeType) Type() NodeType { + return t +} + +const ( + NodeText NodeType = iota // Plain text. + NodeAction // A non-control action such as a field evaluation. + NodeBool // A boolean constant. + NodeChain // A sequence of field accesses. + NodeCommand // An element of a pipeline. + NodeDot // The cursor, dot. + nodeElse // An else action. Not added to tree. + nodeEnd // An end action. Not added to tree. + NodeField // A field or method name. + NodeIdentifier // An identifier; always a function name. + NodeIf // An if action. + NodeList // A list of Nodes. + NodeNil // An untyped nil constant. + NodeNumber // A numerical constant. + NodePipe // A pipeline of commands. + NodeRange // A range action. + NodeString // A string constant. + NodeTemplate // A template invocation action. + NodeVariable // A $ variable. + NodeWith // A with action. +) + +// Nodes. + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Pos + tr *Tree + Nodes []Node // The element nodes in lexical order. +} + +func (t *Tree) newList(pos Pos) *ListNode { + return &ListNode{tr: t, NodeType: NodeList, Pos: pos} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) tree() *Tree { + return l.tr +} + +func (l *ListNode) String() string { + b := new(bytes.Buffer) + for _, n := range l.Nodes { + fmt.Fprint(b, n) + } + return b.String() +} + +func (l *ListNode) CopyList() *ListNode { + if l == nil { + return l + } + n := l.tr.newList(l.Pos) + for _, elem := range l.Nodes { + n.append(elem.Copy()) + } + return n +} + +func (l *ListNode) Copy() Node { + return l.CopyList() +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Pos + tr *Tree + Text []byte // The text; may span newlines. +} + +func (t *Tree) newText(pos Pos, text string) *TextNode { + return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf(textFormat, t.Text) +} + +func (t *TextNode) tree() *Tree { + return t.tr +} + +func (t *TextNode) Copy() Node { + return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} +} + +// PipeNode holds a pipeline with optional declaration +type PipeNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Decl []*VariableNode // Variable declarations in lexical order. + Cmds []*CommandNode // The commands in lexical order. +} + +func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { + return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} +} + +func (p *PipeNode) append(command *CommandNode) { + p.Cmds = append(p.Cmds, command) +} + +func (p *PipeNode) String() string { + s := "" + if len(p.Decl) > 0 { + for i, v := range p.Decl { + if i > 0 { + s += ", " + } + s += v.String() + } + s += " := " + } + for i, c := range p.Cmds { + if i > 0 { + s += " | " + } + s += c.String() + } + return s +} + +func (p *PipeNode) tree() *Tree { + return p.tr +} + +func (p *PipeNode) CopyPipe() *PipeNode { + if p == nil { + return p + } + var decl []*VariableNode + for _, d := range p.Decl { + decl = append(decl, d.Copy().(*VariableNode)) + } + n := p.tr.newPipeline(p.Pos, p.Line, decl) + for _, c := range p.Cmds { + n.append(c.Copy().(*CommandNode)) + } + return n +} + +func (p *PipeNode) Copy() Node { + return p.CopyPipe() +} + +// ActionNode holds an action (something bounded by delimiters). +// Control actions have their own nodes; ActionNode represents simple +// ones such as field evaluations and parenthesized pipelines. +type ActionNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline in the action. +} + +func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { + return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} +} + +func (a *ActionNode) String() string { + return fmt.Sprintf("{{%s}}", a.Pipe) + +} + +func (a *ActionNode) tree() *Tree { + return a.tr +} + +func (a *ActionNode) Copy() Node { + return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) + +} + +// CommandNode holds a command (a pipeline inside an evaluating action). +type CommandNode struct { + NodeType + Pos + tr *Tree + Args []Node // Arguments in lexical order: Identifier, field, or constant. +} + +func (t *Tree) newCommand(pos Pos) *CommandNode { + return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} +} + +func (c *CommandNode) append(arg Node) { + c.Args = append(c.Args, arg) +} + +func (c *CommandNode) String() string { + s := "" + for i, arg := range c.Args { + if i > 0 { + s += " " + } + if arg, ok := arg.(*PipeNode); ok { + s += "(" + arg.String() + ")" + continue + } + s += arg.String() + } + return s +} + +func (c *CommandNode) tree() *Tree { + return c.tr +} + +func (c *CommandNode) Copy() Node { + if c == nil { + return c + } + n := c.tr.newCommand(c.Pos) + for _, c := range c.Args { + n.append(c.Copy()) + } + return n +} + +// IdentifierNode holds an identifier. +type IdentifierNode struct { + NodeType + Pos + tr *Tree + Ident string // The identifier's name. +} + +// NewIdentifier returns a new IdentifierNode with the given identifier name. +func NewIdentifier(ident string) *IdentifierNode { + return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} +} + +// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { + i.Pos = pos + return i +} + +// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// Chained for convenience. +// TODO: fix one day? +func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { + i.tr = t + return i +} + +func (i *IdentifierNode) String() string { + return i.Ident +} + +func (i *IdentifierNode) tree() *Tree { + return i.tr +} + +func (i *IdentifierNode) Copy() Node { + return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) +} + +// VariableNode holds a list of variable names, possibly with chained field +// accesses. The dollar sign is part of the (first) name. +type VariableNode struct { + NodeType + Pos + tr *Tree + Ident []string // Variable name and fields in lexical order. +} + +func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { + return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} +} + +func (v *VariableNode) String() string { + s := "" + for i, id := range v.Ident { + if i > 0 { + s += "." + } + s += id + } + return s +} + +func (v *VariableNode) tree() *Tree { + return v.tr +} + +func (v *VariableNode) Copy() Node { + return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} +} + +// DotNode holds the special identifier '.'. +type DotNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newDot(pos Pos) *DotNode { + return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} +} + +func (d *DotNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeDot +} + +func (d *DotNode) String() string { + return "." +} + +func (d *DotNode) tree() *Tree { + return d.tr +} + +func (d *DotNode) Copy() Node { + return d.tr.newDot(d.Pos) +} + +// NilNode holds the special identifier 'nil' representing an untyped nil constant. +type NilNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newNil(pos Pos) *NilNode { + return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} +} + +func (n *NilNode) Type() NodeType { + // Override method on embedded NodeType for API compatibility. + // TODO: Not really a problem; could change API without effect but + // api tool complains. + return NodeNil +} + +func (n *NilNode) String() string { + return "nil" +} + +func (n *NilNode) tree() *Tree { + return n.tr +} + +func (n *NilNode) Copy() Node { + return n.tr.newNil(n.Pos) +} + +// FieldNode holds a field (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The period is dropped from each ident. +type FieldNode struct { + NodeType + Pos + tr *Tree + Ident []string // The identifiers in lexical order. +} + +func (t *Tree) newField(pos Pos, ident string) *FieldNode { + return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period +} + +func (f *FieldNode) String() string { + s := "" + for _, id := range f.Ident { + s += "." + id + } + return s +} + +func (f *FieldNode) tree() *Tree { + return f.tr +} + +func (f *FieldNode) Copy() Node { + return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} +} + +// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). +// The names may be chained ('.x.y'). +// The periods are dropped from each ident. +type ChainNode struct { + NodeType + Pos + tr *Tree + Node Node + Field []string // The identifiers in lexical order. +} + +func (t *Tree) newChain(pos Pos, node Node) *ChainNode { + return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} +} + +// Add adds the named field (which should start with a period) to the end of the chain. +func (c *ChainNode) Add(field string) { + if len(field) == 0 || field[0] != '.' { + panic("no dot in field") + } + field = field[1:] // Remove leading dot. + if field == "" { + panic("empty field") + } + c.Field = append(c.Field, field) +} + +func (c *ChainNode) String() string { + s := c.Node.String() + if _, ok := c.Node.(*PipeNode); ok { + s = "(" + s + ")" + } + for _, field := range c.Field { + s += "." + field + } + return s +} + +func (c *ChainNode) tree() *Tree { + return c.tr +} + +func (c *ChainNode) Copy() Node { + return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} +} + +// BoolNode holds a boolean constant. +type BoolNode struct { + NodeType + Pos + tr *Tree + True bool // The value of the boolean constant. +} + +func (t *Tree) newBool(pos Pos, true bool) *BoolNode { + return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} +} + +func (b *BoolNode) String() string { + if b.True { + return "true" + } + return "false" +} + +func (b *BoolNode) tree() *Tree { + return b.tr +} + +func (b *BoolNode) Copy() Node { + return b.tr.newBool(b.Pos, b.True) +} + +// NumberNode holds a number: signed or unsigned integer, float, or complex. +// The value is parsed and stored under all the types that can represent the value. +// This simulates in a small amount of code the behavior of Go's ideal constants. +type NumberNode struct { + NodeType + Pos + tr *Tree + IsInt bool // Number has an integral value. + IsUint bool // Number has an unsigned integral value. + IsFloat bool // Number has a floating-point value. + IsComplex bool // Number is complex. + Int64 int64 // The signed integer value. + Uint64 uint64 // The unsigned integer value. + Float64 float64 // The floating-point value. + Complex128 complex128 // The complex value. + Text string // The original textual representation from the input. +} + +func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { + n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} + switch typ { + case itemCharConstant: + rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) + if err != nil { + return nil, err + } + if tail != "'" { + return nil, fmt.Errorf("malformed character constant: %s", text) + } + n.Int64 = int64(rune) + n.IsInt = true + n.Uint64 = uint64(rune) + n.IsUint = true + n.Float64 = float64(rune) // odd but those are the rules. + n.IsFloat = true + return n, nil + case itemComplex: + // fmt.Sscan can parse the pair, so let it do the work. + if _, err := fmt.Sscan(text, &n.Complex128); err != nil { + return nil, err + } + n.IsComplex = true + n.simplifyComplex() + return n, nil + } + // Imaginary constants can only be complex unless they are zero. + if len(text) > 0 && text[len(text)-1] == 'i' { + f, err := strconv.ParseFloat(text[:len(text)-1], 64) + if err == nil { + n.IsComplex = true + n.Complex128 = complex(0, f) + n.simplifyComplex() + return n, nil + } + } + // Do integer test first so we get 0x123 etc. + u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. + if err == nil { + n.IsUint = true + n.Uint64 = u + } + i, err := strconv.ParseInt(text, 0, 64) + if err == nil { + n.IsInt = true + n.Int64 = i + if i == 0 { + n.IsUint = true // in case of -0. + n.Uint64 = u + } + } + // If an integer extraction succeeded, promote the float. + if n.IsInt { + n.IsFloat = true + n.Float64 = float64(n.Int64) + } else if n.IsUint { + n.IsFloat = true + n.Float64 = float64(n.Uint64) + } else { + f, err := strconv.ParseFloat(text, 64) + if err == nil { + n.IsFloat = true + n.Float64 = f + // If a floating-point extraction succeeded, extract the int if needed. + if !n.IsInt && float64(int64(f)) == f { + n.IsInt = true + n.Int64 = int64(f) + } + if !n.IsUint && float64(uint64(f)) == f { + n.IsUint = true + n.Uint64 = uint64(f) + } + } + } + if !n.IsInt && !n.IsUint && !n.IsFloat { + return nil, fmt.Errorf("illegal number syntax: %q", text) + } + return n, nil +} + +// simplifyComplex pulls out any other types that are represented by the complex number. +// These all require that the imaginary part be zero. +func (n *NumberNode) simplifyComplex() { + n.IsFloat = imag(n.Complex128) == 0 + if n.IsFloat { + n.Float64 = real(n.Complex128) + n.IsInt = float64(int64(n.Float64)) == n.Float64 + if n.IsInt { + n.Int64 = int64(n.Float64) + } + n.IsUint = float64(uint64(n.Float64)) == n.Float64 + if n.IsUint { + n.Uint64 = uint64(n.Float64) + } + } +} + +func (n *NumberNode) String() string { + return n.Text +} + +func (n *NumberNode) tree() *Tree { + return n.tr +} + +func (n *NumberNode) Copy() Node { + nn := new(NumberNode) + *nn = *n // Easy, fast, correct. + return nn +} + +// StringNode holds a string constant. The value has been "unquoted". +type StringNode struct { + NodeType + Pos + tr *Tree + Quoted string // The original text of the string, with quotes. + Text string // The string, after quote processing. +} + +func (t *Tree) newString(pos Pos, orig, text string) *StringNode { + return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} +} + +func (s *StringNode) String() string { + return s.Quoted +} + +func (s *StringNode) tree() *Tree { + return s.tr +} + +func (s *StringNode) Copy() Node { + return s.tr.newString(s.Pos, s.Quoted, s.Text) +} + +// endNode represents an {{end}} action. +// It does not appear in the final parse tree. +type endNode struct { + NodeType + Pos + tr *Tree +} + +func (t *Tree) newEnd(pos Pos) *endNode { + return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} +} + +func (e *endNode) String() string { + return "{{end}}" +} + +func (e *endNode) tree() *Tree { + return e.tr +} + +func (e *endNode) Copy() Node { + return e.tr.newEnd(e.Pos) +} + +// elseNode represents an {{else}} action. Does not appear in the final tree. +type elseNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) +} + +func (t *Tree) newElse(pos Pos, line int) *elseNode { + return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} +} + +func (e *elseNode) Type() NodeType { + return nodeElse +} + +func (e *elseNode) String() string { + return "{{else}}" +} + +func (e *elseNode) tree() *Tree { + return e.tr +} + +func (e *elseNode) Copy() Node { + return e.tr.newElse(e.Pos, e.Line) +} + +// BranchNode is the common representation of if, range, and with. +type BranchNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Pipe *PipeNode // The pipeline to be evaluated. + List *ListNode // What to execute if the value is non-empty. + ElseList *ListNode // What to execute if the value is empty (nil if absent). +} + +func (b *BranchNode) String() string { + name := "" + switch b.NodeType { + case NodeIf: + name = "if" + case NodeRange: + name = "range" + case NodeWith: + name = "with" + default: + panic("unknown branch type") + } + if b.ElseList != nil { + return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) + } + return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) +} + +func (b *BranchNode) tree() *Tree { + return b.tr +} + +func (b *BranchNode) Copy() Node { + switch b.NodeType { + case NodeIf: + return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeRange: + return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + case NodeWith: + return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) + default: + panic("unknown branch type") + } +} + +// IfNode represents an {{if}} action and its commands. +type IfNode struct { + BranchNode +} + +func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { + return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (i *IfNode) Copy() Node { + return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) +} + +// RangeNode represents a {{range}} action and its commands. +type RangeNode struct { + BranchNode +} + +func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { + return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (r *RangeNode) Copy() Node { + return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) +} + +// WithNode represents a {{with}} action and its commands. +type WithNode struct { + BranchNode +} + +func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { + return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} +} + +func (w *WithNode) Copy() Node { + return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) +} + +// TemplateNode represents a {{template}} action. +type TemplateNode struct { + NodeType + Pos + tr *Tree + Line int // The line number in the input (deprecated; kept for compatibility) + Name string // The name of the template (unquoted). + Pipe *PipeNode // The command to evaluate as dot for the template. +} + +func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { + return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} +} + +func (t *TemplateNode) String() string { + if t.Pipe == nil { + return fmt.Sprintf("{{template %q}}", t.Name) + } + return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) +} + +func (t *TemplateNode) tree() *Tree { + return t.tr +} + +func (t *TemplateNode) Copy() Node { + return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) +} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go new file mode 100644 index 0000000000..0d77ade871 --- /dev/null +++ b/vendor/github.com/alecthomas/template/parse/parse.go @@ -0,0 +1,700 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package parse builds parse trees for templates as defined by text/template +// and html/template. Clients should use those packages to construct templates +// rather than this one, which provides shared internal data structures not +// intended for general use. +package parse + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "strings" +) + +// Tree is the representation of a single parsed template. +type Tree struct { + Name string // name of the template represented by the tree. + ParseName string // name of the top-level template during parsing, for error messages. + Root *ListNode // top-level root of the tree. + text string // text parsed to create the template (or its parent) + // Parsing only; cleared after parse. + funcs []map[string]interface{} + lex *lexer + token [3]item // three-token lookahead for parser. + peekCount int + vars []string // variables defined at the moment. +} + +// Copy returns a copy of the Tree. Any parsing state is discarded. +func (t *Tree) Copy() *Tree { + if t == nil { + return nil + } + return &Tree{ + Name: t.Name, + ParseName: t.ParseName, + Root: t.Root.CopyList(), + text: t.text, + } +} + +// Parse returns a map from template name to parse.Tree, created by parsing the +// templates described in the argument string. The top-level template will be +// given the specified name. If an error is encountered, parsing stops and an +// empty map is returned with the error. +func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { + treeSet = make(map[string]*Tree) + t := New(name) + t.text = text + _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) + return +} + +// next returns the next token. +func (t *Tree) next() item { + if t.peekCount > 0 { + t.peekCount-- + } else { + t.token[0] = t.lex.nextItem() + } + return t.token[t.peekCount] +} + +// backup backs the input stream up one token. +func (t *Tree) backup() { + t.peekCount++ +} + +// backup2 backs the input stream up two tokens. +// The zeroth token is already there. +func (t *Tree) backup2(t1 item) { + t.token[1] = t1 + t.peekCount = 2 +} + +// backup3 backs the input stream up three tokens +// The zeroth token is already there. +func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. + t.token[1] = t1 + t.token[2] = t2 + t.peekCount = 3 +} + +// peek returns but does not consume the next token. +func (t *Tree) peek() item { + if t.peekCount > 0 { + return t.token[t.peekCount-1] + } + t.peekCount = 1 + t.token[0] = t.lex.nextItem() + return t.token[0] +} + +// nextNonSpace returns the next non-space token. +func (t *Tree) nextNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + return token +} + +// peekNonSpace returns but does not consume the next non-space token. +func (t *Tree) peekNonSpace() (token item) { + for { + token = t.next() + if token.typ != itemSpace { + break + } + } + t.backup() + return token +} + +// Parsing. + +// New allocates a new parse tree with the given name. +func New(name string, funcs ...map[string]interface{}) *Tree { + return &Tree{ + Name: name, + funcs: funcs, + } +} + +// ErrorContext returns a textual representation of the location of the node in the input text. +// The receiver is only used when the node does not have a pointer to the tree inside, +// which can occur in old code. +func (t *Tree) ErrorContext(n Node) (location, context string) { + pos := int(n.Position()) + tree := n.tree() + if tree == nil { + tree = t + } + text := tree.text[:pos] + byteNum := strings.LastIndex(text, "\n") + if byteNum == -1 { + byteNum = pos // On first line. + } else { + byteNum++ // After the newline. + byteNum = pos - byteNum + } + lineNum := 1 + strings.Count(text, "\n") + context = n.String() + if len(context) > 20 { + context = fmt.Sprintf("%.20s...", context) + } + return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context +} + +// errorf formats the error and terminates processing. +func (t *Tree) errorf(format string, args ...interface{}) { + t.Root = nil + format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (t *Tree) error(err error) { + t.errorf("%s", err) +} + +// expect consumes the next token and guarantees it has the required type. +func (t *Tree) expect(expected itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected { + t.unexpected(token, context) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { + token := t.nextNonSpace() + if token.typ != expected1 && token.typ != expected2 { + t.unexpected(token, context) + } + return token +} + +// unexpected complains about the token and terminates processing. +func (t *Tree) unexpected(token item, context string) { + t.errorf("unexpected %s in %s", token, context) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (t *Tree) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + if t != nil { + t.stopParse() + } + *errp = e.(error) + } + return +} + +// startParse initializes the parser, using the lexer. +func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { + t.Root = nil + t.lex = lex + t.vars = []string{"$"} + t.funcs = funcs +} + +// stopParse terminates parsing. +func (t *Tree) stopParse() { + t.lex = nil + t.vars = nil + t.funcs = nil +} + +// Parse parses the template definition string to construct a representation of +// the template for execution. If either action delimiter string is empty, the +// default ("{{" or "}}") is used. Embedded template definitions are added to +// the treeSet map. +func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { + defer t.recover(&err) + t.ParseName = t.Name + t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) + t.text = text + t.parse(treeSet) + t.add(treeSet) + t.stopParse() + return t, nil +} + +// add adds tree to the treeSet. +func (t *Tree) add(treeSet map[string]*Tree) { + tree := treeSet[t.Name] + if tree == nil || IsEmptyTree(tree.Root) { + treeSet[t.Name] = t + return + } + if !IsEmptyTree(t.Root) { + t.errorf("template: multiple definition of template %q", t.Name) + } +} + +// IsEmptyTree reports whether this tree (node) is empty of everything but space. +func IsEmptyTree(n Node) bool { + switch n := n.(type) { + case nil: + return true + case *ActionNode: + case *IfNode: + case *ListNode: + for _, node := range n.Nodes { + if !IsEmptyTree(node) { + return false + } + } + return true + case *RangeNode: + case *TemplateNode: + case *TextNode: + return len(bytes.TrimSpace(n.Text)) == 0 + case *WithNode: + default: + panic("unknown node: " + n.String()) + } + return false +} + +// parse is the top-level parser for a template, essentially the same +// as itemList except it also parses {{define}} actions. +// It runs to EOF. +func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { + t.Root = t.newList(t.peek().pos) + for t.peek().typ != itemEOF { + if t.peek().typ == itemLeftDelim { + delim := t.next() + if t.nextNonSpace().typ == itemDefine { + newT := New("definition") // name will be updated once we know it. + newT.text = t.text + newT.ParseName = t.ParseName + newT.startParse(t.funcs, t.lex) + newT.parseDefinition(treeSet) + continue + } + t.backup2(delim) + } + n := t.textOrAction() + if n.Type() == nodeEnd { + t.errorf("unexpected %s", n) + } + t.Root.append(n) + } + return nil +} + +// parseDefinition parses a {{define}} ... {{end}} template definition and +// installs the definition in the treeSet map. The "define" keyword has already +// been scanned. +func (t *Tree) parseDefinition(treeSet map[string]*Tree) { + const context = "define clause" + name := t.expectOneOf(itemString, itemRawString, context) + var err error + t.Name, err = strconv.Unquote(name.val) + if err != nil { + t.error(err) + } + t.expect(itemRightDelim, context) + var end Node + t.Root, end = t.itemList() + if end.Type() != nodeEnd { + t.errorf("unexpected %s in %s", end, context) + } + t.add(treeSet) + t.stopParse() +} + +// itemList: +// textOrAction* +// Terminates at {{end}} or {{else}}, returned separately. +func (t *Tree) itemList() (list *ListNode, next Node) { + list = t.newList(t.peekNonSpace().pos) + for t.peekNonSpace().typ != itemEOF { + n := t.textOrAction() + switch n.Type() { + case nodeEnd, nodeElse: + return list, n + } + list.append(n) + } + t.errorf("unexpected EOF") + return +} + +// textOrAction: +// text | action +func (t *Tree) textOrAction() Node { + switch token := t.nextNonSpace(); token.typ { + case itemElideNewline: + return t.elideNewline() + case itemText: + return t.newText(token.pos, token.val) + case itemLeftDelim: + return t.action() + default: + t.unexpected(token, "input") + } + return nil +} + +// elideNewline: +// Remove newlines trailing rightDelim if \\ is present. +func (t *Tree) elideNewline() Node { + token := t.peek() + if token.typ != itemText { + t.unexpected(token, "input") + return nil + } + + t.next() + stripped := strings.TrimLeft(token.val, "\n\r") + diff := len(token.val) - len(stripped) + if diff > 0 { + // This is a bit nasty. We mutate the token in-place to remove + // preceding newlines. + token.pos += Pos(diff) + token.val = stripped + } + return t.newText(token.pos, token.val) +} + +// Action: +// control +// command ("|" command)* +// Left delim is past. Now get actions. +// First word could be a keyword such as range. +func (t *Tree) action() (n Node) { + switch token := t.nextNonSpace(); token.typ { + case itemElse: + return t.elseControl() + case itemEnd: + return t.endControl() + case itemIf: + return t.ifControl() + case itemRange: + return t.rangeControl() + case itemTemplate: + return t.templateControl() + case itemWith: + return t.withControl() + } + t.backup() + // Do not pop variables; they persist until "end". + return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) +} + +// Pipeline: +// declarations? command ('|' command)* +func (t *Tree) pipeline(context string) (pipe *PipeNode) { + var decl []*VariableNode + pos := t.peekNonSpace().pos + // Are there declarations? + for { + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { + t.nextNonSpace() + variable := t.newVariable(v.pos, v.val) + decl = append(decl, variable) + t.vars = append(t.vars, v.val) + if next.typ == itemChar && next.val == "," { + if context == "range" && len(decl) < 2 { + continue + } + t.errorf("too many declarations in %s", context) + } + } else if tokenAfterVariable.typ == itemSpace { + t.backup3(v, tokenAfterVariable) + } else { + t.backup2(v) + } + } + break + } + pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) + for { + switch token := t.nextNonSpace(); token.typ { + case itemRightDelim, itemRightParen: + if len(pipe.Cmds) == 0 { + t.errorf("missing value for %s", context) + } + if token.typ == itemRightParen { + t.backup() + } + return + case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, + itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: + t.backup() + pipe.append(t.command()) + default: + t.unexpected(token, context) + } + } +} + +func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { + defer t.popVars(len(t.vars)) + line = t.lex.lineNumber() + pipe = t.pipeline(context) + var next Node + list, next = t.itemList() + switch next.Type() { + case nodeEnd: //done + case nodeElse: + if allowElseIf { + // Special case for "else if". If the "else" is followed immediately by an "if", + // the elseControl will have left the "if" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. + // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} + // is assumed. This technique works even for long if-else-if chains. + // TODO: Should we allow else-if in with and range? + if t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + // Do not consume the next item - only one {{end}} required. + break + } + } + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) + } + } + return pipe.Position(), line, pipe, list, elseList +} + +// If: +// {{if pipeline}} itemList {{end}} +// {{if pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) ifControl() Node { + return t.newIf(t.parseControl(true, "if")) +} + +// Range: +// {{range pipeline}} itemList {{end}} +// {{range pipeline}} itemList {{else}} itemList {{end}} +// Range keyword is past. +func (t *Tree) rangeControl() Node { + return t.newRange(t.parseControl(false, "range")) +} + +// With: +// {{with pipeline}} itemList {{end}} +// {{with pipeline}} itemList {{else}} itemList {{end}} +// If keyword is past. +func (t *Tree) withControl() Node { + return t.newWith(t.parseControl(false, "with")) +} + +// End: +// {{end}} +// End keyword is past. +func (t *Tree) endControl() Node { + return t.newEnd(t.expect(itemRightDelim, "end").pos) +} + +// Else: +// {{else}} +// Else keyword is past. +func (t *Tree) elseControl() Node { + // Special case for "else if". + peek := t.peekNonSpace() + if peek.typ == itemIf { + // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + return t.newElse(peek.pos, t.lex.lineNumber()) + } + return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) +} + +// Template: +// {{template stringValue pipeline}} +// Template keyword is past. The name must be something that can evaluate +// to a string. +func (t *Tree) templateControl() Node { + var name string + token := t.nextNonSpace() + switch token.typ { + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + name = s + default: + t.unexpected(token, "template invocation") + } + var pipe *PipeNode + if t.nextNonSpace().typ != itemRightDelim { + t.backup() + // Do not pop variables; they persist until "end". + pipe = t.pipeline("template") + } + return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) +} + +// command: +// operand (space operand)* +// space-separated arguments up to a pipeline character or right delimiter. +// we consume the pipe character but leave the right delim to terminate the action. +func (t *Tree) command() *CommandNode { + cmd := t.newCommand(t.peekNonSpace().pos) + for { + t.peekNonSpace() // skip leading spaces. + operand := t.operand() + if operand != nil { + cmd.append(operand) + } + switch token := t.next(); token.typ { + case itemSpace: + continue + case itemError: + t.errorf("%s", token.val) + case itemRightDelim, itemRightParen: + t.backup() + case itemPipe: + default: + t.errorf("unexpected %s in operand; missing space?", token) + } + break + } + if len(cmd.Args) == 0 { + t.errorf("empty command") + } + return cmd +} + +// operand: +// term .Field* +// An operand is a space-separated component of a command, +// a term possibly followed by field accesses. +// A nil return means the next item is not an operand. +func (t *Tree) operand() Node { + node := t.term() + if node == nil { + return nil + } + if t.peek().typ == itemField { + chain := t.newChain(t.peek().pos, node) + for t.peek().typ == itemField { + chain.Add(t.next().val) + } + // Compatibility with original API: If the term is of type NodeField + // or NodeVariable, just put more fields on the original. + // Otherwise, keep the Chain node. + // TODO: Switch to Chains always when we can. + switch node.Type() { + case NodeField: + node = t.newField(chain.Position(), chain.String()) + case NodeVariable: + node = t.newVariable(chain.Position(), chain.String()) + default: + node = chain + } + } + return node +} + +// term: +// literal (number, string, nil, boolean) +// function (identifier) +// . +// .Field +// $ +// '(' pipeline ')' +// A term is a simple "expression". +// A nil return means the next item is not a term. +func (t *Tree) term() Node { + switch token := t.nextNonSpace(); token.typ { + case itemError: + t.errorf("%s", token.val) + case itemIdentifier: + if !t.hasFunction(token.val) { + t.errorf("function %q not defined", token.val) + } + return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) + case itemDot: + return t.newDot(token.pos) + case itemNil: + return t.newNil(token.pos) + case itemVariable: + return t.useVar(token.pos, token.val) + case itemField: + return t.newField(token.pos, token.val) + case itemBool: + return t.newBool(token.pos, token.val == "true") + case itemCharConstant, itemComplex, itemNumber: + number, err := t.newNumber(token.pos, token.val, token.typ) + if err != nil { + t.error(err) + } + return number + case itemLeftParen: + pipe := t.pipeline("parenthesized pipeline") + if token := t.next(); token.typ != itemRightParen { + t.errorf("unclosed right paren: unexpected %s", token) + } + return pipe + case itemString, itemRawString: + s, err := strconv.Unquote(token.val) + if err != nil { + t.error(err) + } + return t.newString(token.pos, token.val, s) + } + t.backup() + return nil +} + +// hasFunction reports if a function name exists in the Tree's maps. +func (t *Tree) hasFunction(name string) bool { + for _, funcMap := range t.funcs { + if funcMap == nil { + continue + } + if funcMap[name] != nil { + return true + } + } + return false +} + +// popVars trims the variable list to the specified length +func (t *Tree) popVars(n int) { + t.vars = t.vars[:n] +} + +// useVar returns a node for a variable reference. It errors if the +// variable is not defined. +func (t *Tree) useVar(pos Pos, name string) Node { + v := t.newVariable(pos, name) + for _, varName := range t.vars { + if varName == v.Ident[0] { + return v + } + } + t.errorf("undefined variable %q", v.Ident[0]) + return nil +} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go new file mode 100644 index 0000000000..447ed2abae --- /dev/null +++ b/vendor/github.com/alecthomas/template/template.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package template + +import ( + "fmt" + "reflect" + + "github.com/alecthomas/template/parse" +) + +// common holds the information shared by related templates. +type common struct { + tmpl map[string]*Template + // We use two maps, one for parsing and one for execution. + // This separation makes the API cleaner since it doesn't + // expose reflection to the client. + parseFuncs FuncMap + execFuncs map[string]reflect.Value +} + +// Template is the representation of a parsed template. The *parse.Tree +// field is exported only for use by html/template and should be treated +// as unexported by all other clients. +type Template struct { + name string + *parse.Tree + *common + leftDelim string + rightDelim string +} + +// New allocates a new template with the given name. +func New(name string) *Template { + return &Template{ + name: name, + } +} + +// Name returns the name of the template. +func (t *Template) Name() string { + return t.name +} + +// New allocates a new template associated with the given one and with the same +// delimiters. The association, which is transitive, allows one template to +// invoke another with a {{template}} action. +func (t *Template) New(name string) *Template { + t.init() + return &Template{ + name: name, + common: t.common, + leftDelim: t.leftDelim, + rightDelim: t.rightDelim, + } +} + +func (t *Template) init() { + if t.common == nil { + t.common = new(common) + t.tmpl = make(map[string]*Template) + t.parseFuncs = make(FuncMap) + t.execFuncs = make(map[string]reflect.Value) + } +} + +// Clone returns a duplicate of the template, including all associated +// templates. The actual representation is not copied, but the name space of +// associated templates is, so further calls to Parse in the copy will add +// templates to the copy but not to the original. Clone can be used to prepare +// common templates and use them with variant definitions for other templates +// by adding the variants after the clone is made. +func (t *Template) Clone() (*Template, error) { + nt := t.copy(nil) + nt.init() + nt.tmpl[t.name] = nt + for k, v := range t.tmpl { + if k == t.name { // Already installed. + continue + } + // The associated templates share nt's common structure. + tmpl := v.copy(nt.common) + nt.tmpl[k] = tmpl + } + for k, v := range t.parseFuncs { + nt.parseFuncs[k] = v + } + for k, v := range t.execFuncs { + nt.execFuncs[k] = v + } + return nt, nil +} + +// copy returns a shallow copy of t, with common set to the argument. +func (t *Template) copy(c *common) *Template { + nt := New(t.name) + nt.Tree = t.Tree + nt.common = c + nt.leftDelim = t.leftDelim + nt.rightDelim = t.rightDelim + return nt +} + +// AddParseTree creates a new template with the name and parse tree +// and associates it with t. +func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { + if t.common != nil && t.tmpl[name] != nil { + return nil, fmt.Errorf("template: redefinition of template %q", name) + } + nt := t.New(name) + nt.Tree = tree + t.tmpl[name] = nt + return nt, nil +} + +// Templates returns a slice of the templates associated with t, including t +// itself. +func (t *Template) Templates() []*Template { + if t.common == nil { + return nil + } + // Return a slice so we don't expose the map. + m := make([]*Template, 0, len(t.tmpl)) + for _, v := range t.tmpl { + m = append(m, v) + } + return m +} + +// Delims sets the action delimiters to the specified strings, to be used in +// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// definitions will inherit the settings. An empty delimiter stands for the +// corresponding default: {{ or }}. +// The return value is the template, so calls can be chained. +func (t *Template) Delims(left, right string) *Template { + t.leftDelim = left + t.rightDelim = right + return t +} + +// Funcs adds the elements of the argument map to the template's function map. +// It panics if a value in the map is not a function with appropriate return +// type. However, it is legal to overwrite elements of the map. The return +// value is the template, so calls can be chained. +func (t *Template) Funcs(funcMap FuncMap) *Template { + t.init() + addValueFuncs(t.execFuncs, funcMap) + addFuncs(t.parseFuncs, funcMap) + return t +} + +// Lookup returns the template with the given name that is associated with t, +// or nil if there is no such template. +func (t *Template) Lookup(name string) *Template { + if t.common == nil { + return nil + } + return t.tmpl[name] +} + +// Parse parses a string into a template. Nested template definitions will be +// associated with the top-level template t. Parse may be called multiple times +// to parse definitions of templates to associate with t. It is an error if a +// resulting template is non-empty (contains content other than template +// definitions) and would replace a non-empty template with the same name. +// (In multiple calls to Parse with the same receiver template, only one call +// can contain text other than space, comments, and template definitions.) +func (t *Template) Parse(text string) (*Template, error) { + t.init() + trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) + if err != nil { + return nil, err + } + // Add the newly parsed trees, including the one for t, into our common structure. + for name, tree := range trees { + // If the name we parsed is the name of this template, overwrite this template. + // The associate method checks it's not a redefinition. + tmpl := t + if name != t.name { + tmpl = t.New(name) + } + // Even if t == tmpl, we need to install it in the common.tmpl map. + if replace, err := t.associate(tmpl, tree); err != nil { + return nil, err + } else if replace { + tmpl.Tree = tree + } + tmpl.leftDelim = t.leftDelim + tmpl.rightDelim = t.rightDelim + } + return t, nil +} + +// associate installs the new template into the group of templates associated +// with t. It is an error to reuse a name except to overwrite an empty +// template. The two are already known to share the common structure. +// The boolean return value reports wither to store this tree as t.Tree. +func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { + if new.common != t.common { + panic("internal error: associate not common") + } + name := new.name + if old := t.tmpl[name]; old != nil { + oldIsEmpty := parse.IsEmptyTree(old.Root) + newIsEmpty := parse.IsEmptyTree(tree.Root) + if newIsEmpty { + // Whether old is empty or not, new is empty; no reason to replace old. + return false, nil + } + if !oldIsEmpty { + return false, fmt.Errorf("template: redefinition of template %q", name) + } + } + t.tmpl[name] = new + return true, nil +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go new file mode 100644 index 0000000000..583025ce53 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go @@ -0,0 +1,78 @@ +package grpc_ctxtags + +import ( + "context" +) + +type ctxMarker struct{} + +var ( + // ctxMarkerKey is the Context value marker used by *all* logging middleware. + // The logging middleware object must interf + ctxMarkerKey = &ctxMarker{} + // NoopTags is a trivial, minimum overhead implementation of Tags for which all operations are no-ops. + NoopTags = &noopTags{} +) + +// Tags is the interface used for storing request tags between Context calls. +// The default implementation is *not* thread safe, and should be handled only in the context of the request. +type Tags interface { + // Set sets the given key in the metadata tags. + Set(key string, value interface{}) Tags + // Has checks if the given key exists. + Has(key string) bool + // Values returns a map of key to values. + // Do not modify the underlying map, please use Set instead. + Values() map[string]interface{} +} + +type mapTags struct { + values map[string]interface{} +} + +func (t *mapTags) Set(key string, value interface{}) Tags { + t.values[key] = value + return t +} + +func (t *mapTags) Has(key string) bool { + _, ok := t.values[key] + return ok +} + +func (t *mapTags) Values() map[string]interface{} { + return t.values +} + +type noopTags struct{} + +func (t *noopTags) Set(key string, value interface{}) Tags { + return t +} + +func (t *noopTags) Has(key string) bool { + return false +} + +func (t *noopTags) Values() map[string]interface{} { + return nil +} + +// Extracts returns a pre-existing Tags object in the Context. +// If the context wasn't set in a tag interceptor, a no-op Tag storage is returned that will *not* be propagated in context. +func Extract(ctx context.Context) Tags { + t, ok := ctx.Value(ctxMarkerKey).(Tags) + if !ok { + return NoopTags + } + + return t +} + +func setInContext(ctx context.Context, tags Tags) context.Context { + return context.WithValue(ctx, ctxMarkerKey, tags) +} + +func newTags() Tags { + return &mapTags{values: make(map[string]interface{})} +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go new file mode 100644 index 0000000000..960638d0fa --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go @@ -0,0 +1,22 @@ +/* +`grpc_ctxtags` adds a Tag object to the context that can be used by other middleware to add context about a request. + +Request Context Tags + +Tags describe information about the request, and can be set and used by other middleware, or handlers. Tags are used +for logging and tracing of requests. Tags are populated both upwards, *and* downwards in the interceptor-handler stack. + +You can automatically extract tags (in `grpc.request.`) from request payloads. + +For unary and server-streaming methods, pass in the `WithFieldExtractor` option. For client-streams and bidirectional-streams, you can +use `WithFieldExtractorForInitialReq` which will extract the tags from the first message passed from client to server. +Note the tags will not be modified for subsequent requests, so this option only makes sense when the initial message +establishes the meta-data for the stream. + +If a user doesn't use the interceptors that initialize the `Tags` object, all operations following from an `Extract(ctx)` +will be no-ops. This is to ensure that code doesn't panic if the interceptors weren't used. + +Tags fields are typed, and shallow and should follow the OpenTracing semantics convention: +https://github.com/opentracing/specification/blob/master/semantic_conventions.md +*/ +package grpc_ctxtags diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go new file mode 100644 index 0000000000..549ff48c81 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go @@ -0,0 +1,85 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +import ( + "reflect" +) + +// RequestFieldExtractorFunc is a user-provided function that extracts field information from a gRPC request. +// It is called from tags middleware on arrival of unary request or a server-stream request. +// Keys and values will be added to the context tags of the request. If there are no fields, you should return a nil. +type RequestFieldExtractorFunc func(fullMethod string, req interface{}) map[string]interface{} + +type requestFieldsExtractor interface { + // ExtractRequestFields is a method declared on a Protobuf message that extracts fields from the interface. + // The values from the extracted fields should be set in the appendToMap, in order to avoid allocations. + ExtractRequestFields(appendToMap map[string]interface{}) +} + +// CodeGenRequestFieldExtractor is a function that relies on code-generated functions that export log fields from requests. +// These are usually coming from a protoc-plugin that generates additional information based on custom field options. +func CodeGenRequestFieldExtractor(fullMethod string, req interface{}) map[string]interface{} { + if ext, ok := req.(requestFieldsExtractor); ok { + retMap := make(map[string]interface{}) + ext.ExtractRequestFields(retMap) + if len(retMap) == 0 { + return nil + } + return retMap + } + return nil +} + +// TagBasedRequestFieldExtractor is a function that relies on Go struct tags to export log fields from requests. +// These are usually coming from a protoc-plugin, such as Gogo protobuf. +// +// message Metadata { +// repeated string tags = 1 [ (gogoproto.moretags) = "log_field:\"meta_tags\"" ]; +// } +// +// The tagName is configurable using the tagName variable. Here it would be "log_field". +func TagBasedRequestFieldExtractor(tagName string) RequestFieldExtractorFunc { + return func(fullMethod string, req interface{}) map[string]interface{} { + retMap := make(map[string]interface{}) + reflectMessageTags(req, retMap, tagName) + if len(retMap) == 0 { + return nil + } + return retMap + } +} + +func reflectMessageTags(msg interface{}, existingMap map[string]interface{}, tagName string) { + v := reflect.ValueOf(msg) + // Only deal with pointers to structs. + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return + } + // Deref the pointer get to the struct. + v = v.Elem() + t := v.Type() + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + kind := field.Kind() + // Only recurse down direct pointers, which should only be to nested structs. + if kind == reflect.Ptr { + reflectMessageTags(field.Interface(), existingMap, tagName) + } + // In case of arrays/splices (repeated fields) go down to the concrete type. + if kind == reflect.Array || kind == reflect.Slice { + if field.Len() == 0 { + continue + } + kind = field.Index(0).Kind() + } + // Only be interested in + if (kind >= reflect.Bool && kind <= reflect.Float64) || kind == reflect.String { + if tag := t.Field(i).Tag.Get(tagName); tag != "" { + existingMap[tag] = field.Interface() + } + } + } + return +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go new file mode 100644 index 0000000000..038afd26bd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go @@ -0,0 +1,83 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +import ( + "github.com/grpc-ecosystem/go-grpc-middleware" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/peer" +) + +// UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + o := evaluateOptions(opts) + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + newCtx := newTagsForCtx(ctx) + if o.requestFieldsFunc != nil { + setRequestFieldTags(newCtx, o.requestFieldsFunc, info.FullMethod, req) + } + return handler(newCtx, req) + } +} + +// StreamServerInterceptor returns a new streaming server interceptor that sets the values for request tags. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + o := evaluateOptions(opts) + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + newCtx := newTagsForCtx(stream.Context()) + if o.requestFieldsFunc == nil { + // Short-circuit, don't do the expensive bit of allocating a wrappedStream. + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = newCtx + return handler(srv, wrappedStream) + } + wrapped := &wrappedStream{stream, info, o, newCtx, true} + err := handler(srv, wrapped) + return err + } +} + +// wrappedStream is a thin wrapper around grpc.ServerStream that allows modifying context and extracts log fields from the initial message. +type wrappedStream struct { + grpc.ServerStream + info *grpc.StreamServerInfo + opts *options + // WrappedContext is the wrapper's own Context. You can assign it. + WrappedContext context.Context + initial bool +} + +// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() +func (w *wrappedStream) Context() context.Context { + return w.WrappedContext +} + +func (w *wrappedStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + // We only do log fields extraction on the single-request of a server-side stream. + if !w.info.IsClientStream || w.opts.requestFieldsFromInitial && w.initial { + w.initial = false + + setRequestFieldTags(w.Context(), w.opts.requestFieldsFunc, w.info.FullMethod, m) + } + return err +} + +func newTagsForCtx(ctx context.Context) context.Context { + t := newTags() + if peer, ok := peer.FromContext(ctx); ok { + t.Set("peer.address", peer.Addr.String()) + } + return setInContext(ctx, t) +} + +func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) { + if valMap := f(fullMethodName, req); valMap != nil { + t := Extract(ctx) + for k, v := range valMap { + t.Set("grpc.request."+k, v) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go new file mode 100644 index 0000000000..952775f88d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go @@ -0,0 +1,44 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_ctxtags + +var ( + defaultOptions = &options{ + requestFieldsFunc: nil, + } +) + +type options struct { + requestFieldsFunc RequestFieldExtractorFunc + requestFieldsFromInitial bool +} + +func evaluateOptions(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + for _, o := range opts { + o(optCopy) + } + return optCopy +} + +type Option func(*options) + +// WithFieldExtractor customizes the function for extracting log fields from protobuf messages, for +// unary and server-streamed methods only. +func WithFieldExtractor(f RequestFieldExtractorFunc) Option { + return func(o *options) { + o.requestFieldsFunc = f + } +} + +// WithFieldExtractorForInitialReq customizes the function for extracting log fields from protobuf messages, +// for all unary and streaming methods. For client-streams and bidirectional-streams, the tags will be +// extracted from the first message from the client. +func WithFieldExtractorForInitialReq(f RequestFieldExtractorFunc) Option { + return func(o *options) { + o.requestFieldsFunc = f + o.requestFieldsFromInitial = true + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go new file mode 100644 index 0000000000..f8fdecf5ab --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/client_interceptors.go @@ -0,0 +1,142 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "io" + "sync" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +// UnaryClientInterceptor returns a new unary client interceptor for OpenTracing. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + o := evaluateOptions(opts) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if o.filterOutFunc != nil && !o.filterOutFunc(parentCtx, method) { + return invoker(parentCtx, method, req, reply, cc, opts...) + } + newCtx, clientSpan := newClientSpanFromContext(parentCtx, o.tracer, method) + err := invoker(newCtx, method, req, reply, cc, opts...) + finishClientSpan(clientSpan, err) + return err + } +} + +// StreamClientInterceptor returns a new streaming client interceptor for OpenTracing. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + o := evaluateOptions(opts) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if o.filterOutFunc != nil && !o.filterOutFunc(parentCtx, method) { + return streamer(parentCtx, desc, cc, method, opts...) + } + newCtx, clientSpan := newClientSpanFromContext(parentCtx, o.tracer, method) + clientStream, err := streamer(newCtx, desc, cc, method, opts...) + if err != nil { + finishClientSpan(clientSpan, err) + return nil, err + } + return &tracedClientStream{ClientStream: clientStream, clientSpan: clientSpan}, nil + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type tracedClientStream struct { + grpc.ClientStream + mu sync.Mutex + alreadyFinished bool + clientSpan opentracing.Span +} + +func (s *tracedClientStream) Header() (metadata.MD, error) { + h, err := s.ClientStream.Header() + if err != nil { + s.finishClientSpan(err) + } + return h, err +} + +func (s *tracedClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err != nil { + s.finishClientSpan(err) + } + return err +} + +func (s *tracedClientStream) CloseSend() error { + err := s.ClientStream.CloseSend() + if err != nil { + s.finishClientSpan(err) + } + return err +} + +func (s *tracedClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err != nil { + s.finishClientSpan(err) + } + return err +} + +func (s *tracedClientStream) finishClientSpan(err error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.alreadyFinished { + finishClientSpan(s.clientSpan, err) + s.alreadyFinished = true + } +} + +// ClientAddContextTags returns a context with specified opentracing tags, which +// are used by UnaryClientInterceptor/StreamClientInterceptor when creating a +// new span. +func ClientAddContextTags(ctx context.Context, tags opentracing.Tags) context.Context { + return context.WithValue(ctx, clientSpanTagKey{}, tags) +} + +type clientSpanTagKey struct{} + +func newClientSpanFromContext(ctx context.Context, tracer opentracing.Tracer, fullMethodName string) (context.Context, opentracing.Span) { + var parentSpanCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentSpanCtx = parent.Context() + } + opts := []opentracing.StartSpanOption{ + opentracing.ChildOf(parentSpanCtx), + ext.SpanKindRPCClient, + grpcTag, + } + if tagx := ctx.Value(clientSpanTagKey{}); tagx != nil { + if opt, ok := tagx.(opentracing.StartSpanOption); ok { + opts = append(opts, opt) + } + } + clientSpan := tracer.StartSpan(fullMethodName, opts...) + // Make sure we add this to the metadata of the call, so it gets propagated: + md := metautils.ExtractOutgoing(ctx).Clone() + if err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, metadataTextMap(md)); err != nil { + grpclog.Printf("grpc_opentracing: failed serializing trace information: %v", err) + } + ctxWithMetadata := md.ToOutgoing(ctx) + return opentracing.ContextWithSpan(ctxWithMetadata, clientSpan), clientSpan +} + +func finishClientSpan(clientSpan opentracing.Span, err error) { + if err != nil && err != io.EOF { + ext.Error.Set(clientSpan, true) + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + clientSpan.Finish() +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go new file mode 100644 index 0000000000..7a58efc22c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/doc.go @@ -0,0 +1,22 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_opentracing` adds OpenTracing + +OpenTracing Interceptors + +These are both client-side and server-side interceptors for OpenTracing. They are a provider-agnostic, with backends +such as Zipkin, or Google Stackdriver Trace. + +For a service that sends out requests and receives requests, you *need* to use both, otherwise downstream requests will +not have the appropriate requests propagated. + +All server-side spans are tagged with grpc_ctxtags information. + +For more information see: +http://opentracing.io/documentation/ +https://github.com/opentracing/specification/blob/master/semantic_conventions.md + +*/ +package grpc_opentracing diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go new file mode 100644 index 0000000000..d19f3c8e37 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/id_extract.go @@ -0,0 +1,67 @@ +package grpc_opentracing + +import ( + "strings" + + grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" + opentracing "github.com/opentracing/opentracing-go" + "google.golang.org/grpc/grpclog" +) + +const ( + TagTraceId = "trace.traceid" + TagSpanId = "trace.spanid" + TagSampled = "trace.sampled" + jaegerNotSampledFlag = "0" +) + +// injectOpentracingIdsToTags writes trace data to ctxtags. +// This is done in an incredibly hacky way, because the public-facing interface of opentracing doesn't give access to +// the TraceId and SpanId of the SpanContext. Only the Tracer's Inject/Extract methods know what these are. +// Most tracers have them encoded as keys with 'traceid' and 'spanid': +// https://github.com/openzipkin/zipkin-go-opentracing/blob/594640b9ef7e5c994e8d9499359d693c032d738c/propagation_ot.go#L29 +// https://github.com/opentracing/basictracer-go/blob/1b32af207119a14b1b231d451df3ed04a72efebf/propagation_ot.go#L26 +// Jaeger from Uber use one-key schema with next format '{trace-id}:{span-id}:{parent-span-id}:{flags}' +// https://www.jaegertracing.io/docs/client-libraries/#trace-span-identity +func injectOpentracingIdsToTags(span opentracing.Span, tags grpc_ctxtags.Tags) { + if err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, &tagsCarrier{tags}); err != nil { + grpclog.Infof("grpc_opentracing: failed extracting trace info into ctx %v", err) + } +} + +// tagsCarrier is a really hacky way of +type tagsCarrier struct { + grpc_ctxtags.Tags +} + +func (t *tagsCarrier) Set(key, val string) { + key = strings.ToLower(key) + if strings.Contains(key, "traceid") { + t.Tags.Set(TagTraceId, val) // this will most likely be base-16 (hex) encoded + } + + if strings.Contains(key, "spanid") && !strings.Contains(strings.ToLower(key), "parent") { + t.Tags.Set(TagSpanId, val) // this will most likely be base-16 (hex) encoded + } + + if strings.Contains(key, "sampled") { + switch val { + case "true", "false": + t.Tags.Set(TagSampled, val) + } + } + + if key == "uber-trace-id" { + parts := strings.Split(val, ":") + if len(parts) == 4 { + t.Tags.Set(TagTraceId, parts[0]) + t.Tags.Set(TagSpanId, parts[1]) + + if parts[3] != jaegerNotSampledFlag { + t.Tags.Set(TagSampled, "true") + } else { + t.Tags.Set(TagSampled, "false") + } + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go new file mode 100644 index 0000000000..38f251d31a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/metadata.go @@ -0,0 +1,56 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "encoding/base64" + "strings" + + "fmt" + + "google.golang.org/grpc/metadata" +) + +const ( + binHdrSuffix = "-bin" +) + +// metadataTextMap extends a metadata.MD to be an opentracing textmap +type metadataTextMap metadata.MD + +// Set is a opentracing.TextMapReader interface that extracts values. +func (m metadataTextMap) Set(key, val string) { + // gRPC allows for complex binary values to be written. + encodedKey, encodedVal := encodeKeyValue(key, val) + // The metadata object is a multimap, and previous values may exist, but for opentracing headers, we do not append + // we just override. + m[encodedKey] = []string{encodedVal} +} + +// ForeachKey is a opentracing.TextMapReader interface that extracts values. +func (m metadataTextMap) ForeachKey(callback func(key, val string) error) error { + for k, vv := range m { + for _, v := range vv { + if decodedKey, decodedVal, err := metadata.DecodeKeyValue(k, v); err == nil { + if err = callback(decodedKey, decodedVal); err != nil { + return err + } + } else { + return fmt.Errorf("failed decoding opentracing from gRPC metadata: %v", err) + } + } + } + return nil +} + +// encodeKeyValue encodes key and value qualified for transmission via gRPC. +// note: copy pasted from private values of grpc.metadata +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go new file mode 100644 index 0000000000..e75102b250 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/options.go @@ -0,0 +1,55 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "context" + + "github.com/opentracing/opentracing-go" +) + +var ( + defaultOptions = &options{ + filterOutFunc: nil, + tracer: nil, + } +) + +// FilterFunc allows users to provide a function that filters out certain methods from being traced. +// +// If it returns false, the given request will not be traced. +type FilterFunc func(ctx context.Context, fullMethodName string) bool + +type options struct { + filterOutFunc FilterFunc + tracer opentracing.Tracer +} + +func evaluateOptions(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + for _, o := range opts { + o(optCopy) + } + if optCopy.tracer == nil { + optCopy.tracer = opentracing.GlobalTracer() + } + return optCopy +} + +type Option func(*options) + +// WithFilterFunc customizes the function used for deciding whether a given call is traced or not. +func WithFilterFunc(f FilterFunc) Option { + return func(o *options) { + o.filterOutFunc = f + } +} + +// WithTracer sets a custom tracer to be used for this middleware, otherwise the opentracing.GlobalTracer is used. +func WithTracer(tracer opentracing.Tracer) Option { + return func(o *options) { + o.tracer = tracer + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go new file mode 100644 index 0000000000..53764a083c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing/server_interceptors.go @@ -0,0 +1,87 @@ +// Copyright 2017 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_opentracing + +import ( + "github.com/grpc-ecosystem/go-grpc-middleware" + "github.com/grpc-ecosystem/go-grpc-middleware/tags" + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +var ( + grpcTag = opentracing.Tag{Key: string(ext.Component), Value: "gRPC"} +) + +// UnaryServerInterceptor returns a new unary server interceptor for OpenTracing. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + o := evaluateOptions(opts) + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if o.filterOutFunc != nil && !o.filterOutFunc(ctx, info.FullMethod) { + return handler(ctx, req) + } + newCtx, serverSpan := newServerSpanFromInbound(ctx, o.tracer, info.FullMethod) + resp, err := handler(newCtx, req) + finishServerSpan(ctx, serverSpan, err) + return resp, err + } +} + +// StreamServerInterceptor returns a new streaming server interceptor for OpenTracing. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + o := evaluateOptions(opts) + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if o.filterOutFunc != nil && !o.filterOutFunc(stream.Context(), info.FullMethod) { + return handler(srv, stream) + } + newCtx, serverSpan := newServerSpanFromInbound(stream.Context(), o.tracer, info.FullMethod) + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = newCtx + err := handler(srv, wrappedStream) + finishServerSpan(newCtx, serverSpan, err) + return err + } +} + +func newServerSpanFromInbound(ctx context.Context, tracer opentracing.Tracer, fullMethodName string) (context.Context, opentracing.Span) { + md := metautils.ExtractIncoming(ctx) + parentSpanContext, err := tracer.Extract(opentracing.HTTPHeaders, metadataTextMap(md)) + if err != nil && err != opentracing.ErrSpanContextNotFound { + grpclog.Printf("grpc_opentracing: failed parsing trace information: %v", err) + } + + serverSpan := tracer.StartSpan( + fullMethodName, + // this is magical, it attaches the new span to the parent parentSpanContext, and creates an unparented one if empty. + ext.RPCServerOption(parentSpanContext), + grpcTag, + ) + + injectOpentracingIdsToTags(serverSpan, grpc_ctxtags.Extract(ctx)) + return opentracing.ContextWithSpan(ctx, serverSpan), serverSpan +} + +func finishServerSpan(ctx context.Context, serverSpan opentracing.Span, err error) { + // Log context information + tags := grpc_ctxtags.Extract(ctx) + for k, v := range tags.Values() { + // Don't tag errors, log them instead. + if vErr, ok := v.(error); ok { + serverSpan.LogKV(k, vErr.Error()) + + } else { + serverSpan.SetTag(k, v) + } + } + if err != nil { + ext.Error.Set(serverSpan, true) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + serverSpan.Finish() +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go new file mode 100644 index 0000000000..1ed9bb499b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go @@ -0,0 +1,19 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go new file mode 100644 index 0000000000..a277bee3f7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -0,0 +1,126 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "strings" + + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +// NiceMD is a convenience wrapper definiting extra functions on the metadata. +type NiceMD metadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractIncoming(ctx context.Context) NiceMD { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractOutgoing(ctx context.Context) NiceMD { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// Clone performs a *deep* copy of the metadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m NiceMD) Clone(copiedKeys ...string) NiceMD { + newMd := NiceMD(metadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.ToLower(allowedKey) == strings.ToLower(k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return NiceMD(newMd) +} + +// ToOutgoing sets the given NiceMD as a client-side context for dispatching. +func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, metadata.MD(m)) +} + +// ToIncoming sets the given NiceMD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors.. +func (m NiceMD) ToIncoming(ctx context.Context) context.Context { + return metadata.NewIncomingContext(ctx, metadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m NiceMD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m NiceMD) Del(key string) NiceMD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m NiceMD) Set(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m NiceMD) Add(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go new file mode 100644 index 0000000000..8a53871662 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go @@ -0,0 +1,22 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/github.com/minio/minio-go/v6/.gitignore b/vendor/github.com/minio/minio-go/v6/.gitignore new file mode 100644 index 0000000000..fa967abd77 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/.gitignore @@ -0,0 +1,3 @@ +*~ +*.test +validator diff --git a/vendor/github.com/minio/minio-go/v6/.travis.yml b/vendor/github.com/minio/minio-go/v6/.travis.yml new file mode 100644 index 0000000000..bc891f3fb8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/.travis.yml @@ -0,0 +1,37 @@ +sudo: false +language: go + +os: +- linux + +env: +- ARCH=x86_64 + +go: +- 1.12.x +- tip + +matrix: + fast_finish: true + allow_failures: + - go: tip + +addons: + apt: + packages: + - devscripts + +before_install: + - curl -O https://dl.minio.io/server/minio/release/linux-amd64/minio && chmod +x ./minio + - sudo cp testcerts/public.crt /usr/local/share/ca-certificates/ + - sudo update-ca-certificates + - MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 ./minio server --compat --quiet --certs-dir testcerts data 2>&1 > minio.log & + +script: + - diff -au <(gofmt -d .) <(printf "") + - diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") + - make + +addons: + artifacts: + - minio.log diff --git a/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md new file mode 100644 index 0000000000..8b1ee86c6d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/CONTRIBUTING.md @@ -0,0 +1,23 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v6/LICENSE b/vendor/github.com/minio/minio-go/v6/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md new file mode 100644 index 0000000000..f640dfb9f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/MAINTAINERS.md @@ -0,0 +1,35 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/v6/Makefile b/vendor/github.com/minio/minio-go/v6/Makefile new file mode 100644 index 0000000000..892f8cf507 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/Makefile @@ -0,0 +1,20 @@ +all: checks + +.PHONY: examples docs + +checks: vet test examples docs functional-test + +vet: + @GO111MODULE=on go vet ./... + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: + @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done + +docs: + @(cd docs; GO111MODULE=on go build validator.go && ./validator -m ../docs/API.md -t checker.go.tpl) + +functional-test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go diff --git a/vendor/github.com/minio/minio-go/v6/NOTICE b/vendor/github.com/minio/minio-go/v6/NOTICE new file mode 100644 index 0000000000..fc6c8808e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/NOTICE @@ -0,0 +1,2 @@ +minio-go +Copyright 2015-2017 MinIO, Inc. \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/README.md b/vendor/github.com/minio/minio-go/v6/README.md new file mode 100644 index 0000000000..8a66bf5c99 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/README.md @@ -0,0 +1,238 @@ +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) + +The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. + +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). + +This document assumes that you have a working [Go development environment](https://docs.min.io/docs/how-to-install-golang). + +## Download from Github +```sh +go get -u github.com/minio/minio-go +``` + +## Initialize MinIO Client +MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| accessKeyID | Access key is the user ID that uniquely identifies your account. | +| secretAccessKey | Secret key is the password to your account. | +| secure | Set this value to 'true' to enable secure (HTTPS) access. | + + +```go +package main + +import ( + "github.com/minio/minio-go/v6" + "log" +) + +func main() { + endpoint := "play.min.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now setup +} +``` + +## Quick Start Example - File Uploader +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + +We will use the MinIO server running at [https://play.min.io:9000](https://play.min.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +### FileUploader.go +```go +package main + +import ( + "github.com/minio/minio-go/v6" + "log" +) + +func main() { + endpoint := "play.min.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### Run FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API Reference +The full API Reference is available here. + +* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) + +### API Reference : Bucket Operations +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API Reference : Bucket policy Operations +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API Reference : Bucket notification Operations +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) + +### API Reference : File Object Operations +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext) + +### API Reference : Object Operations +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) + + +### API Reference : Presigned Operations +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## Full Examples + +### Full Examples : Bucket Operations +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket notification Operations +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) + +### Full Examples : File Object Operations +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### Full Examples : Object Operations +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.min.io) +* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) + +## Contribute +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) + +## License +This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v6/README_zh_CN.md b/vendor/github.com/minio/minio-go/v6/README_zh_CN.md new file mode 100644 index 0000000000..c9c5e3133d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/README_zh_CN.md @@ -0,0 +1,244 @@ +# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) + +MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 + +**支持的云存储:** + +- AWS Signature Version 4 + - Amazon S3 + - MinIO + +- AWS Signature Version 2 + - Google Cloud Storage (兼容模式) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 + +本文假设你已经有 [Go开发环境](https://docs.min.io/docs/how-to-install-golang)。 + +## 从Github下载 +```sh +go get -u github.com/minio/minio-go +``` + +## 初始化MinIO Client +MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 + +| 参数 | 描述| +| :--- | :--- | +| endpoint | 对象存储服务的URL | +| accessKeyID | Access key是唯一标识你的账户的用户ID。 | +| secretAccessKey | Secret key是你账户的密码。 | +| secure | true代表使用HTTPS | + + +```go +package main + +import ( + "github.com/minio/minio-go/v6" + "log" +) + +func main() { + endpoint := "play.min.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient初使化成功 +} +``` + +## 示例-文件上传 +本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 + +我们在本示例中使用运行在 [https://play.min.io:9000](https://play.min.io:9000) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 + +### FileUploader.go +```go +package main + +import ( + "github.com/minio/minio-go/v6" + "log" +) + +func main() { + endpoint := "play.min.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化minio client对象。 + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + // 创建一个叫mymusic的存储桶。 + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) + if err != nil { + // 检查存储桶是否已经存在。 + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } + log.Printf("Successfully created %s\n", bucketName) + + // 上传一个zip文件。 + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // 使用FPutObject上传一个zip文件。 + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### 运行FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API文档 +完整的API文档在这里。 +* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) + +### API文档 : 操作存储桶 +* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API文档 : 存储桶策略 +* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API文档 : 存储桶通知 +* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) + +### API文档 : 操作文件对象 +* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext) + +### API文档 : 操作对象 +* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext) +* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) + +### API文档: 操作加密对象 +* [`GetEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#GetEncryptedObject) +* [`PutEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#PutEncryptedObject) + +### API文档 : Presigned操作 +* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API文档 : 客户端自定义设置 +* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) + +## 完整示例 + +### 完整示例 : 操作存储桶 +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### 完整示例 : 存储桶策略 +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### 完整示例 : 存储桶通知 +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) + +### 完整示例 : 操作文件对象 +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### 完整示例 : 操作对象 +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### 完整示例 : 操作加密对象 +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### 完整示例 : Presigned操作 +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## 了解更多 +* [完整文档](https://docs.min.io) +* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) + +## 贡献 +[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) + diff --git a/vendor/github.com/minio/minio-go/v6/api-compose-object.go b/vendor/github.com/minio/minio-go/v6/api-compose-object.go new file mode 100644 index 0000000000..748b558ce3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-compose-object.go @@ -0,0 +1,565 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v6/pkg/encrypt" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// DestinationInfo - type with information about the object to be +// created via server-side copy requests, using the Compose API. +type DestinationInfo struct { + bucket, object string + encryption encrypt.ServerSide + + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + userMetadata map[string]string +} + +// NewDestinationInfo - creates a compose-object/copy-source +// destination info object. +// +// `encSSEC` is the key info for server-side-encryption with customer +// provided key. If it is nil, no encryption is performed. +// +// `userMeta` is the user-metadata key-value pairs to be set on the +// destination. The keys are automatically prefixed with `x-amz-meta-` +// if needed. If nil is passed, and if only a single source (of any +// size) is provided in the ComposeObject call, then metadata from the +// source is copied to the destination. +func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucket); err != nil { + return d, err + } + if err = s3utils.CheckValidObjectName(object); err != nil { + return d, err + } + + // Process custom-metadata to remove a `x-amz-meta-` prefix if + // present and validate that keys are distinct (after this + // prefix removal). + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) + } + m[k] = v + } + + return DestinationInfo{ + bucket: bucket, + object: object, + encryption: sse, + userMetadata: m, + }, nil +} + +// getUserMetaHeadersMap - construct appropriate key-value pairs to send +// as headers from metadata map to pass into copy-object request. For +// single part copy-object (i.e. non-multipart object), enable the +// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to +// `REPLACE`, so that metadata headers from the source are not copied +// over. +func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { + if len(d.userMetadata) == 0 { + return nil + } + r := make(map[string]string) + if withCopyDirectiveHeader { + r["x-amz-metadata-directive"] = "REPLACE" + } + for k, v := range d.userMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + r[k] = v + } else { + r["x-amz-meta-"+k] = v + } + } + return r +} + +// SourceInfo - represents a source object to be copied, using +// server-side copying APIs. +type SourceInfo struct { + bucket, object string + start, end int64 + encryption encrypt.ServerSide + // Headers to send with the upload-part-copy request involving + // this source object. + Headers http.Header +} + +// NewSourceInfo - create a compose-object/copy-object source info +// object. +// +// `decryptSSEC` is the decryption key using server-side-encryption +// with customer provided key. It may be nil if the source is not +// encrypted. +func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { + r := SourceInfo{ + bucket: bucket, + object: object, + start: -1, // range is unspecified by default + encryption: sse, + Headers: make(http.Header), + } + + // Set the source header + r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) + return r +} + +// SetRange - Set the start and end offset of the source object to be +// copied. If this method is not called, the whole source object is +// copied. +func (s *SourceInfo) SetRange(start, end int64) error { + if start > end || start < 0 { + return ErrInvalidArgument("start must be non-negative, and start must be at most end.") + } + // Note that 0 <= start <= end + s.start, s.end = start, end + return nil +} + +// SetMatchETagCond - Set ETag match condition. The object is copied +// only if the etag of the source matches the value given here. +func (s *SourceInfo) SetMatchETagCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-match", etag) + return nil +} + +// SetMatchETagExceptCond - Set the ETag match exception +// condition. The object is copied only if the etag of the source is +// not the value given here. +func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-none-match", etag) + return nil +} + +// SetModifiedSinceCond - Set the modified since condition. +func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetUnmodifiedSinceCond - Set the unmodified since condition. +func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// Helper to fetch size and etag of an object using a StatObject call. +func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { + // Get object info - need size and etag here. Also, decryption + // headers are added to the stat request if given. + var objInfo ObjectInfo + opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} + objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) + if err != nil { + err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) + } else { + size = objInfo.Size + etag = objInfo.ETag + userMeta = make(map[string]string) + for k, v := range objInfo.Metadata { + if strings.HasPrefix(k, "x-amz-meta-") { + if len(v) > 0 { + userMeta[k] = v[0] + } + } + } + } + return +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string) (ObjectInfo, error) { + + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { + + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, ErrInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header) (p CompletePart, err error) { + + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObjectWithProgress - creates an object using server-side copying of +// existing objects. It takes a list of source objects (with optional +// offsets) and concatenates them into a new object using only +// server-side copying operations. Optionally takes progress reader hook +// for applications to look at current progress. +func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") + } + ctx := context.Background() + srcSizes := make([]int64, len(srcs)) + var totalSize, size, totalParts int64 + var srcUserMeta map[string]string + etags := make([]string, len(srcs)) + var err error + for i, src := range srcs { + size, etags[i], srcUserMeta, err = src.getProps(c) + if err != nil { + return err + } + + // Error out if client side encryption is used in this source object when + // more than one source objects are given. + if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { + return ErrInvalidArgument( + fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) + } + + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.start != -1 { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.end >= size { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.start, src.end, size)) + } + size = src.end - src.start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if size < absMinPartSize && i < len(srcs)-1 { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) + } + + // Is data to copy too large? + totalSize += size + if totalSize > maxMultipartPutObjectSize { + return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcSizes[i] = size + + // calculate parts needed for current source + totalParts += partsRequired(size) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return ErrInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObjectWithProgress(dst, srcs[0], progress) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + if src.Headers.Get("x-amz-copy-source-if-match") == "" { + src.SetMatchETagCond(etags[i]) + } + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + userMeta := dst.getUserMetaHeadersMap(false) + metaMap := userMeta + if len(userMeta) == 0 && len(srcs) == 1 { + metaMap = srcUserMeta + } + metaHeaders := make(map[string]string) + for k, v := range metaMap { + metaHeaders[k] = v + } + + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) + if err != nil { + return err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := src.Headers + if src.encryption != nil { + encrypt.SSECopy(src.encryption).Marshal(h) + } + // Add destination encryption headers + if dst.encryption != nil { + dst.encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.bucket, + dst.object, uploadID, partIndex, h) + if err != nil { + return err + } + if progress != nil { + io.CopyN(ioutil.Discard, progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, + completeMultipartUpload{Parts: objParts}) + if err != nil { + return err + } + return nil +} + +// ComposeObject - creates an object using server-side copying of +// existing objects. It takes a list of source objects (with optional +// offsets) and concatenates them into a new object using only +// server-side copying operations. +func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { + return c.ComposeObjectWithProgress(dst, srcs, nil) +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/v6/api-datatypes.go b/vendor/github.com/minio/minio-go/v6/api-datatypes.go new file mode 100644 index 0000000000..3fb47fc107 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-datatypes.go @@ -0,0 +1,85 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // Owner name. + Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` + } `json:"owner"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/v6/api-error-response.go b/vendor/github.com/minio/minio-go/v6/api-error-response.go new file mode 100644 index 0000000000..726fdd274a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-error-response.go @@ -0,0 +1,282 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go/v6" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + } + + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, + } + default: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + } + } + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func ErrTransferAccelerationBucket(bucketName string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + BucketName: bucketName, + } +} + +// ErrEntityTooLarge - Input size is larger than supported maximum. +func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrEntityTooSmall - Input size is smaller than supported minimum. +func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedEOF - Unexpected end of file reached. +func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrInvalidBucketName - Invalid bucket name response. +func ErrInvalidBucketName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectName - Invalid object name response. +func ErrInvalidObjectName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectPrefix - Invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + +// ErrInvalidArgument - Invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// ErrNoSuchBucketPolicy - No Such Bucket Policy response +// The specified bucket does not have a bucket policy. +func ErrNoSuchBucketPolicy(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchBucketPolicy", + Message: message, + RequestID: "minio", + } +} + +// ErrAPINotSupported - API not supported response +// The specified API call is not supported +func ErrAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go b/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go new file mode 100644 index 0000000000..a24d03e370 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-lifecycle.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// GetBucketLifecycle - get bucket lifecycle. +func (c Client) GetBucketLifecycle(bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketLifecycle, err := c.getBucketLifecycle(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchLifecycleConfiguration" { + return "", nil + } + return "", err + } + return bucketLifecycle, nil +} + +// Request server for current bucket lifecycle. +func (c Client) getBucketLifecycle(bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + lifecycle := string(bucketLifecycleBuf) + return lifecycle, err +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go new file mode 100644 index 0000000000..ea809492aa --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-object-acl.go @@ -0,0 +1,136 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" +) + +type accessControlPolicy struct { + Owner struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + } `xml:"Owner"` + AccessControlList struct { + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` + } `xml:"AccessControlList"` +} + +//GetObjectACL get object ACLs +func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { + + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-context.go b/vendor/github.com/minio/minio-go/v6/api-get-object-context.go new file mode 100644 index 0000000000..3c3afd0590 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-object-context.go @@ -0,0 +1,26 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "context" + +// GetObjectWithContext - returns an seekable, readable object. +// The options can be used to specify the GET request further. +func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object-file.go b/vendor/github.com/minio/minio-go/v6/api-get-object-file.go new file mode 100644 index 0000000000..9c82a7c4f9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-object-file.go @@ -0,0 +1,125 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// FGetObjectWithContext - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) +} + +// FGetObject - download contents of an object to a local file. +func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) +} + +// fGetObjectWithContext - fgetObject wrapper function with context +func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return ErrInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-object.go b/vendor/github.com/minio/minio-go/v6/api-get-object.go new file mode 100644 index 0000000000..e4cd851f43 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-object.go @@ -0,0 +1,659 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) +} + +// GetObject wrapper function that accepts a request context +func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + var httpReader io.ReadCloser + var objectInfo ObjectInfo + var err error + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Used to verify if etag of object has changed since last read. + var etag string + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if size > 0 && err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: int(size), + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + if etag != "" { + opts.SetMatchETag(etag) + } + objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + if etag != "" { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Reply back how much was read. + resCh <- getResponse{ + Size: int(size), + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + // Switch through whence. + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source + o.seekData = true + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, err + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + + // Get content-type. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + objectStat := ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: resp.ContentLength, + LastModified: date, + ContentType: contentType, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: extractObjMetadata(resp.Header), + } + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-options.go b/vendor/github.com/minio/minio-go/v6/api-get-options.go new file mode 100644 index 0000000000..538fd1a052 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-options.go @@ -0,0 +1,128 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "time" + + "github.com/minio/minio-go/v6/pkg/encrypt" +) + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + ServerSideEncryption encrypt.ServerSide +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions struct { + GetObjectOptions +} + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return ErrInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-get-policy.go b/vendor/github.com/minio/minio-go/v6/api-get-policy.go new file mode 100644 index 0000000000..bc1d10530a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-get-policy.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// GetBucketPolicy - get bucket policy at a given path. +func (c Client) GetBucketPolicy(bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/v6/api-list.go b/vendor/github.com/minio/minio-go/v6/api-list.go new file mode 100644 index 0000000000..b9c0f5d8d0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-list.go @@ -0,0 +1,715 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets() ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +/// Bucket Read Operations. + +// ListObjectsV2 lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recursively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{ + Key: obj.Prefix, + Size: 0, + }: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?continuation-token - Used to continue iterating over a set of objects +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +// ?start-after - Specifies the key to start after when listing objects in a bucket. +func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Set start-after + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, errors.New("Truncated response should have continuation token set") + } + + // Success. + return listBucketResult, nil +} + +// ListObjects - (List Objects) - List some objects or all recursively. +// +// ListObjects lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel to pro-actively close the internal go routine. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Turn on size aggregation of individual parts. + isAggregateSize := true + return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + // Save objectMarker and uploadIDMarker for next request. + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + if aggregateSize { + // Get total multipart size. + obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + continue + } + } + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If done channel return here. + case <-doneCh: + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- object: + // If done channel return here. + case <-doneCh: + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploads - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // maxUploads should be 1000 or less. + if maxUploads == 0 || maxUploads > 1000 { + maxUploads = 1000 + } + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = strings.TrimPrefix(part.ETag, "\"") + part.ETag = strings.TrimSuffix(part.ETag, "\"") + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // Turn off size aggregation of individual parts, in this request. + isAggregateSize := false + // Create done channel to cleanup the routine. + doneCh := make(chan struct{}) + defer close(doneCh) + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { + // Iterate over all parts and aggregate the size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + for _, partInfo := range partsInfo { + size += partInfo.Size + } + return size, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts == 0 || maxParts > 1000 { + maxParts = 1000 + } + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-notification.go b/vendor/github.com/minio/minio-go/v6/api-notification.go new file mode 100644 index 0000000000..a8d9fc6139 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-notification.go @@ -0,0 +1,221 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "context" + "encoding/json" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// GetBucketNotification - get bucket notification at a given path. +func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketNotification{}, err + } + notification, err := c.getBucketNotification(bucketName) + if err != nil { + return BucketNotification{}, err + } + return notification, nil +} + +// Request server for notification rules. +func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return BucketNotification{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return BucketNotification{}, errResponse + } + var bucketNotification BucketNotification + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return BucketNotification{}, err + } + return bucketNotification, nil +} + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// Notification event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// Notification event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// Notification event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// NotificationEvent represents an Amazon an S3 bucket notification event. +type NotificationEvent struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// NotificationInfo - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type NotificationInfo struct { + Records []NotificationEvent + Err error +} + +// ListenBucketNotification - listen on bucket notifications. +func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { + notificationInfoCh := make(chan NotificationInfo, 1) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- NotificationInfo) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + notificationInfoCh <- NotificationInfo{ + Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + notificationInfoCh <- NotificationInfo{ + Err: errResponse, + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Unmarshal each line, returns marshalled values. + for bio.Scan() { + var notificationInfo NotificationInfo + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + closeResponse(resp) + continue + } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + closeResponse(resp) + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/v6/api-presigned.go b/vendor/github.com/minio/minio-go/v6/api-presigned.go new file mode 100644 index 0000000000..e2d68b0ece --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-presigned.go @@ -0,0 +1,215 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v6/pkg/s3signer" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, ErrInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("GET", bucketName, objectName, expires, reqParams) +} + +// PresignedHeadObject - Returns a presigned URL to access object +// metadata without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("PUT", bucketName, objectName, expires, nil) +} + +// Presign - returns a presigned URL for any http method of your choice +// along with custom request params. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. +func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(method, bucketName, objectName, expires, reqParams) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := s3signer.GetCredential(accessKeyID, location, t) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-bucket.go b/vendor/github.com/minio/minio-go/v6/api-put-bucket.go new file mode 100644 index 0000000000..28613a2291 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-bucket.go @@ -0,0 +1,306 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +/// Bucket operations + +// MakeBucket creates a new bucket with bucketName. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(bucketName string, location string) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// SetBucketPolicy set the access permissions on an existing bucket. +func (c Client) SetBucketPolicy(bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(bucketName, policy) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Content-length is mandatory for put policy request + policyReader := strings.NewReader(policy) + b, err := ioutil.ReadAll(policyReader) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyReader, + contentLength: int64(len(b)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if lifecycle == "" { + return c.removeBucketLifecycle(bucketName) + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(bucketName, lifecycle) +} + +// Saves a new bucket lifecycle. +func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + lifecycleReader := strings.NewReader(lifecycle) + b, err := ioutil.ReadAll(lifecycleReader) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: lifecycleReader, + contentLength: int64(len(b)), + contentMD5Base64: sumMD5Base64(b), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c Client) removeBucketLifecycle(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// SetBucketNotification saves a new bucket notification. +func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(bucketNotification) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(bucketName string) error { + return c.SetBucketNotification(bucketName, BucketNotification{}) +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-common.go b/vendor/github.com/minio/minio-go/v6/api-put-object-common.go new file mode 100644 index 0000000000..a786d2a8e0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-common.go @@ -0,0 +1,138 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } else { + _, ok = reader.(io.ReaderAt) + } + return +} + +// optimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 128MiB +// maxMultipartPutObjectSize - 5TiB +// +func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + if objectSize == -1 { + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = ErrEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return + } + + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = ErrInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return + } + + if configuredPartSize < absMinPartSize { + err = ErrInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return + } + + if configuredPartSize > maxPartSize { + err = ErrInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return + } + partSizeFlt = float64(configuredPartSize) + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-context.go b/vendor/github.com/minio/minio-go/v6/api-put-object-context.go new file mode 100644 index 0000000000..415a7878fd --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-context.go @@ -0,0 +1,33 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" +) + +// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. +func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + err = opts.validate() + if err != nil { + return 0, err + } + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go b/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go new file mode 100644 index 0000000000..19e58add89 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-copy.go @@ -0,0 +1,83 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "io/ioutil" + "net/http" + + "github.com/minio/minio-go/v6/pkg/encrypt" +) + +// CopyObject - copy a source object into a new object +func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { + return c.CopyObjectWithProgress(dst, src, nil) +} + +// CopyObjectWithProgress - copy a source object into a new object, optionally takes +// progress bar input to notify current progress. +func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { + header := make(http.Header) + for k, v := range src.Headers { + header[k] = v + } + + var err error + var size int64 + // If progress bar is specified, size should be requested as well initiate a StatObject request. + if progress != nil { + size, _, _, err = src.getProps(c) + if err != nil { + return err + } + } + + if src.encryption != nil { + encrypt.SSECopy(src.encryption).Marshal(header) + } + + if dst.encryption != nil { + dst.encryption.Marshal(header) + } + for k, v := range dst.getUserMetaHeadersMap(true) { + header.Set(k, v) + } + + resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ + bucketName: dst.bucket, + objectName: dst.object, + customHeader: header, + }) + if err != nil { + return err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, dst.bucket, dst.object) + } + + // Update the progress properly after successful copy. + if progress != nil { + io.CopyN(ioutil.Discard, progress, size) + } + + return nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go new file mode 100644 index 0000000000..fb22c0d64b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-file.go b/vendor/github.com/minio/minio-go/v6/api-put-object-file.go new file mode 100644 index 0000000000..23df6cd1c9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-file.go @@ -0,0 +1,27 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath +func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go new file mode 100644 index 0000000000..ab284f9869 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-multipart.go @@ -0,0 +1,372 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "runtime/debug" + "sort" + "strconv" + "strings" + + "github.com/minio/minio-go/v6/pkg/encrypt" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions) (n int64, err error) { + n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + } + return n, err +} + +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) + if err != nil { + return 0, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + defer debug.FreeOSMemory() + + for partNumber <= totalPartsCount { + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials() + + length, rErr := io.ReadFull(reader, buf) + if rErr == io.EOF { + break + } + if rErr != nil && rErr != io.ErrUnexpectedEOF { + return 0, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, "POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectPart{}, err + } + if size > maxPartSize { + return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // Set encryption headers, if any. + customHeader := make(http.Header) + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if sse != nil && sse.Type() == encrypt.SSEC { + sse.Marshal(customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := ObjectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, "POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return completeMultipartUploadResult{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, completeMultipartUploadErr + } + return completeMultipartUploadResult, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go new file mode 100644 index 0000000000..0d3f2455a7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object-streaming.go @@ -0,0 +1,417 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "net/http" + "sort" + "strings" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + + if !isObject(reader) && isReadAt(reader) { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + } + return n, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part *ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part *ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) + if err != nil { + return 0, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + close(uploadPartsCh) + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(partSize int64) { + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + partSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, + sectionReader, uploadReq.PartNum, + "", "", partSize, opts.ServerSideEncryption) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }(partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + // part, ok := partsInfo[uploadRes.PartNum] + part := uploadRes.Part + if part == nil { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize) + if err != nil { + return 0, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader := newHook(reader, opts.Progress) + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), + partNumber, "", "", partSize, opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// putObjectNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return 0, ErrEntityTooSmall(size, bucketName, objectName) + } + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, _ := reader.(io.Seeker) + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return 0, ErrInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + } + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + var objInfo ObjectInfo + // Trim off the odd double quotes from ETag in the beginning and end. + objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") + // A success here means data was written to server successfully. + objInfo.Size = size + + // Return here. + return objInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-put-object.go b/vendor/github.com/minio/minio-go/v6/api-put-object.go new file mode 100644 index 0000000000..8f33fca6cb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-put-object.go @@ -0,0 +1,274 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "runtime/debug" + "sort" + + "github.com/minio/minio-go/v6/pkg/encrypt" + "github.com/minio/minio-go/v6/pkg/s3utils" + "golang.org/x/net/http/httpguts" +) + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string + PartSize uint64 +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + if opts.ContentType != "" { + header["Content-Type"] = []string{opts.ContentType} + } else { + header["Content-Type"] = []string{"application/octet-stream"} + } + if opts.ContentEncoding != "" { + header["Content-Encoding"] = []string{opts.ContentEncoding} + } + if opts.ContentDisposition != "" { + header["Content-Disposition"] = []string{opts.ContentDisposition} + } + if opts.ContentLanguage != "" { + header["Content-Language"] = []string{opts.ContentLanguage} + } + if opts.CacheControl != "" { + header["Cache-Control"] = []string{opts.CacheControl} + } + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + if opts.StorageClass != "" { + header[amzStorageClass] = []string{opts.StorageClass} + } + if opts.WebsiteRedirectLocation != "" { + header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} + } + for k, v := range opts.UserMetadata { + if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { + header["X-Amz-Meta-"+k] = []string{v} + } else { + header[k] = []string{v} + } + } + return +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + return ErrInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return ErrInvalidArgument(v + " unsupported user defined metadata value") + } + } + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 128MiB PutObject automatically does a +// single atomic Put operation. +// - For size larger than 128MiB PutObject automatically does a +// multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) +} + +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + // Do not compute MD5 for Google Cloud Storage. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < int64(partSize) { + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + if size < 0 { + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size < int64(partSize) { + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + + // For all sizes greater than 128MiB do multipart. + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize) + if err != nil { + return 0, err + } + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + defer debug.FreeOSMemory() + + for partNumber <= totalPartsCount { + length, rErr := io.ReadFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return 0, rErr + } + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + "", "", int64(length), opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-remove.go b/vendor/github.com/minio/minio-go/v6/api-remove.go new file mode 100644 index 0000000000..e919be17d1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-remove.go @@ -0,0 +1,303 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// RemoveObject remove an object from a bucket. +func (c Client) RemoveObject(bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []string) []byte { + rmObjects := []deleteObject{} + for _, obj := range objects { + rmObjects = append(rmObjects, deleteObject{Key: obj}) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + errorCh <- RemoveObjectError{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + errorCh <- RemoveObjectError{ + ObjectName: obj.Key, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. +func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: ErrInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + // Generate and call MultiDelete S3 requests based on entries received from objectsCh + go func(errorCh chan<- RemoveObjectError) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []string + + // Try to gather 1000 entries + for object := range objectsCh { + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + errorCh <- RemoveObjectError{ObjectName: "", Err: e} + } + } + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ObjectName: b, Err: err} + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } + }(errorCh) + return errorCh +} + +// RemoveObjects removes multiple objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go new file mode 100644 index 0000000000..a6b125522a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-s3-datatypes.go @@ -0,0 +1,245 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/v6/api-select.go b/vendor/github.com/minio/minio-go/v6/api-select.go new file mode 100644 index 0000000000..b5ce131121 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-select.go @@ -0,0 +1,532 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v6/pkg/encrypt" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + RecordDelimiter string + FieldDelimiter string `xml:",omitempty"` + QuoteCharacter string `xml:",omitempty"` + QuoteEscapeCharacter string `xml:",omitempty"` + Comments string `xml:",omitempty"` +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields `xml:",omitempty"` + RecordDelimiter string + FieldDelimiter string `xml:",omitempty"` + QuoteCharacter string `xml:",omitempty"` + QuoteEscapeCharacter string `xml:",omitempty"` +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON = "JSON" + SelectObjectTypeParquet = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent = "Records" + progressEvent = "Progress" + statsEvent = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + var headers = make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + var pInfo = preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api-stat.go b/vendor/github.com/minio/minio-go/v6/api-stat.go new file mode 100644 index 0000000000..c6fb47d63e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api-stat.go @@ -0,0 +1,192 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// BucketExists verify if bucket exists and you have permission to access it. +func (c Client) BucketExists(bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// List of header keys to be filtered, usually +// from all S3 API http responses. +var defaultFilterKeys = []string{ + "Connection", + "Transfer-Encoding", + "Accept-Ranges", + "Date", + "Server", + "Vary", + "x-amz-bucket-region", + "x-amz-request-id", + "x-amz-id-2", + "Content-Security-Policy", + "X-Xss-Protection", + + // Add new headers to be ignored. +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + filterKeys := append([]string{ + "ETag", + "Content-Length", + "Last-Modified", + "Content-Type", + "Expires", + }, defaultFilterKeys...) + return filterHeader(header, filterKeys) +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + return c.statObject(context.Background(), bucketName, objectName, opts) +} + +// Lower level API for statObject supporting pre-conditions and range headers. +func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + customHeader: opts.Header(), + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := resp.Header.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := resp.Header.Get("Expires") + var expTime time.Time + if t, err := time.Parse(http.TimeFormat, expiryStr); err == nil { + expTime = t.UTC() + } + // Save object metadata info. + return ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + Expires: expTime, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: extractObjMetadata(resp.Header), + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/api.go b/vendor/github.com/minio/minio-go/v6/api.go new file mode 100644 index 0000000000..ece498f416 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/api.go @@ -0,0 +1,914 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/publicsuffix" + + "github.com/minio/minio-go/v6/pkg/credentials" + "github.com/minio/minio-go/v6/pkg/s3signer" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Region string + BucketLookup BucketLookupType + // Add future fields here +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v6.0.27" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// MinIO (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// NewV2 - instantiate minio client with Amazon S3 signature version +// '2' compatibility. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + clnt.overrideSignerType = credentials.SignatureV2 + return clnt, nil +} + +// NewV4 - instantiate minio client with Amazon S3 signature version +// '4' compatibility. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + clnt.overrideSignerType = credentials.SignatureV4 + return clnt, nil +} + +// New - instantiate minio client, adds automatic verification of signature. +func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + return clnt, nil +} + +// NewWithCredentials - instantiate minio client with credentials provider +// for retrieving credentials from various credentials provider such as +// IAM, File, Env etc. +func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithRegion - instantiate minio client, with region configured. Unlike New(), +// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. +// Use this function when if your application deals with single region. +func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithOptions - instantiate minio client with options +func NewWithOptions(endpoint string, opts *Options) (*Client, error) { + return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// Redirect requests by re signing the request. +func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) >= 5 { + return errors.New("stopped after 5 redirects") + } + if len(via) == 0 { + return nil + } + lastRequest := via[len(via)-1] + var reAuth bool + for attr, val := range lastRequest.Header { + // if hosts do not match do not copy Authorization header + if attr == "Authorization" && req.Host != lastRequest.Host { + reAuth = true + continue + } + if _, ok := req.Header[attr]; !ok { + req.Header[attr] = val + } + } + + *c.endpointURL = *req.URL + + value, err := c.credsProvider.Get() + if err != nil { + return err + } + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + region = c.region + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if reAuth { + // Check if there is no region override, if not get it from the URL if possible. + if region == "" { + region = s3utils.GetRegionFromURL(*c.endpointURL) + } + switch { + case signerType.IsV2(): + return errors.New("signature V2 cannot support redirection") + case signerType.IsV4(): + s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) + } + } + return nil +} + +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = creds + + // Remember whether we are using https or not + clnt.secure = secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + transport, err := DefaultTransport(secure) + if err != nil { + return nil, err + } + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: transport, + CheckRedirect: clnt.redirectHeaders, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if region == "" { + region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + clnt.region = region + + // Instantiate bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = lookup + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport + // ``http.DefaultTransport``. + // + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetCustomTransport(tr) + // + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. +// If all tracing needs to be turned off, call TraceOff(). +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false + c.traceErrorsOnly = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]hash.Hash) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = md5.New() + } else { + hashAlgos["sha256"] = sha256.New() + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = md5.New() + } + } + return hashAlgos, hashSums +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + resp, err := c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, ErrInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + var isRetryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = MaxRetry // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + isRetryable = false + } + // Retry only when reader is seekable + if !isRetryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + // Create a done channel to control 'newRetryTimer' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if isRetryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + return nil, err + } + + // Add context to request + req = req.WithContext(ctx) + + // Initiate the request. + res, err = c.do(req) + if err != nil { + // For supported http requests errors verify. + if isHTTPReqErrorRetryable(err) { + continue // Retry. + } + // For other errors, return here no need to retry. + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if metadata.bucketLocation == "" && c.region == "" { + if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" { + if metadata.bucketName != "" && errResponse.Region != "" { + // Gather Cached location only if bucketName is present. + if _, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(metadata.bucketName) + if err != nil { + if ToErrorResponse(err).Code != "AccessDenied" { + return nil, err + } + } + // Upon AccessDenied error on fetching bucket location, default + // to possible locations based on endpoint URL. This can usually + // happen when GetBucketLocation() is disabled using IAM policies. + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: + // Streaming signature is used by default for a PUT object request. Additionally we also + // look if the initialized client is secure, if yes then we don't need to perform + // streaming signature. + req = s3signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + } + + // Return request. + return req, nil +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, ErrTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint. + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + } + } + + urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/v6/appveyor.yml b/vendor/github.com/minio/minio-go/v6/appveyor.yml new file mode 100644 index 0000000000..39a33d876d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/appveyor.yml @@ -0,0 +1,35 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO111MODULE: on + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get golang.org/x/lint/golint + - go get honnef.co/go/tools/cmd/staticcheck + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint -set_exit_status github.com/minio/minio-go/... + - staticcheck + - go test -short -v ./... + - go test -short -race -v ./... + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/vendor/github.com/minio/minio-go/v6/bucket-cache.go b/vendor/github.com/minio/minio-go/v6/bucket-cache.go new file mode 100644 index 0000000000..53a8a9c7fa --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/bucket-cache.go @@ -0,0 +1,230 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "net/url" + "path" + "sync" + + "github.com/minio/minio-go/v6/pkg/credentials" + "github.com/minio/minio-go/v6/pkg/s3signer" + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c Client) GetBucketLocation(bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c Client) getBucketLocation(bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + if errResp.Code == "AccessDenied" { + return "us-east-1", nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + } + } + + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + + // Get a new HTTP request for the method. + req, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/bucket-notification.go b/vendor/github.com/minio/minio-go/v6/bucket-notification.go new file mode 100644 index 0000000000..4714eadad5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/bucket-notification.go @@ -0,0 +1,273 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + + "github.com/minio/minio-go/v6/pkg/set" +) + +// NotificationEventType is a S3 notification event associated to the bucket notification configuration +type NotificationEventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource} +} + +// Return the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// NotificationConfig - represents one single notification configuration +// such as topic, queue or lambda configuration. +type NotificationConfig struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []NotificationEventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewNotificationConfig creates one notification config and sets the given ARN +func NewNotificationConfig(arn Arn) NotificationConfig { + return NotificationConfig{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *NotificationConfig) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *NotificationConfig) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + NotificationConfig + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + NotificationConfig + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + NotificationConfig + Lambda string `xml:"CloudFunction"` +} + +// BucketNotification - the struct that represents the whole XML to be sent to the web service +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { + newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { + newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { + newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *BucketNotification) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *BucketNotification) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} diff --git a/vendor/github.com/minio/minio-go/v6/constants.go b/vendor/github.com/minio/minio-go/v6/constants.go new file mode 100644 index 0000000000..ac472a631d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/constants.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 128MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 128 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +// Storage class header constant. +const amzStorageClass = "X-Amz-Storage-Class" + +// Website redirect location header constant +const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" diff --git a/vendor/github.com/minio/minio-go/v6/core.go b/vendor/github.com/minio/minio-go/v6/core.go new file mode 100644 index 0000000000..ba574c44ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/core.go @@ -0,0 +1,153 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "strings" + + "github.com/minio/minio-go/v6/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { + var s3Client Core + client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { + return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { + return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { + + return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { + opts := PutObjectOptions{} + m := make(map[string]string) + for k, v := range metadata { + if strings.ToLower(k) == "content-encoding" { + opts.ContentEncoding = v + } else if strings.ToLower(k) == "content-disposition" { + opts.ContentDisposition = v + } else if strings.ToLower(k) == "content-language" { + opts.ContentLanguage = v + } else if strings.ToLower(k) == "content-type" { + opts.ContentType = v + } else if strings.ToLower(k) == "cache-control" { + opts.CacheControl = v + } else if strings.EqualFold(k, amzWebsiteRedirectLocation) { + opts.WebsiteRedirectLocation = v + } else { + m[k] = metadata[k] + } + } + opts.UserMetadata = m + opts.ServerSideEncryption = sse + return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { + res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }) + return res.ETag, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { + return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(bucket string) (string, error) { + return c.getBucketPolicy(bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { + return c.putBucketPolicy(bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { + return c.getObject(context.Background(), bucketName, objectName, opts) +} + +// StatObject is a lower level API implemented to support special +// conditions matching etag, modtime on a request. +func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + return c.statObject(context.Background(), bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v6/go.mod b/vendor/github.com/minio/minio-go/v6/go.mod new file mode 100644 index 0000000000..9d342468df --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/go.mod @@ -0,0 +1,16 @@ +module github.com/minio/minio-go/v6 + +go 1.12 + +require ( + github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845 // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/gernest/wow v0.1.0 // indirect + github.com/minio/cli v1.20.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 + github.com/sirupsen/logrus v1.4.2 // indirect + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect + golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f + golang.org/x/net v0.0.0-20190522155817-f3200d17e092 + gopkg.in/ini.v1 v1.42.0 +) diff --git a/vendor/github.com/minio/minio-go/v6/go.sum b/vendor/github.com/minio/minio-go/v6/go.sum new file mode 100644 index 0000000000..62a4f31fac --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/go.sum @@ -0,0 +1,48 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845 h1:hIjQrEARcc9LcH8igte3JBpWBZ7+SpinU70dOjU/afo= +github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845/go.mod h1:c8Mh99Cw82nrsAnPgxQSZHkswVOJF7/MqZb1ZdvriLM= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/gernest/wow v0.1.0 h1:g9xdwCwP0+xgVYlA2sopI0gZHqXe7HjI/7/LykG4fks= +github.com/gernest/wow v0.1.0/go.mod h1:dEPabJRi5BneI1Nev1VWo0ZlcTWibHWp43qxKms4elY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/minio/cli v1.20.0 h1:OVNIt8Rg5+mpYb8siWT2gBV5hvUyFbRvBikC+Ytvf5A= +github.com/minio/cli v1.20.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/minio/minio-go/v6/hook-reader.go b/vendor/github.com/minio/minio-go/v6/hook-reader.go new file mode 100644 index 0000000000..f251c1e95d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/hook-reader.go @@ -0,0 +1,85 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" +) + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + n, err = sourceSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + var m int64 + m, err = hookSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + if n != m { + return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) + } + } + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go new file mode 100644 index 0000000000..6dc8e9d052 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample new file mode 100644 index 0000000000..0affa58cf5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.min.io:9000", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go new file mode 100644 index 0000000000..1a48751b5d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.go @@ -0,0 +1,175 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample new file mode 100644 index 0000000000..7fc91d9d20 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go new file mode 100644 index 0000000000..0c94477b75 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/doc.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go new file mode 100644 index 0000000000..b6e60d0e16 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go new file mode 100644 index 0000000000..5f1ae0d258 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/env_minio.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// EnvMinioironment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ACCESS_KEY") + secret := os.Getenv("MINIO_SECRET_KEY") + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go new file mode 100644 index 0000000000..ff07bc55b9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + homedir "github.com/mitchellh/go-homedir" + ini "gopkg.in/ini.v1" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + filename: filename, + profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.filename == "" { + p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.profile == "" { + p.profile = os.Getenv("AWS_PROFILE") + if p.profile == "" { + p.profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.filename, p.profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go new file mode 100644 index 0000000000..117ceb6efb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/file_minio_client.go @@ -0,0 +1,133 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + homedir "github.com/mitchellh/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + filename string + + // MinIO Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + filename: filename, + alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.filename = value + } else { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.alias == "" { + p.alias = os.Getenv("MINIO_ALIAS") + if p.alias == "" { + p.alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.filename, p.alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go new file mode 100644 index 0000000000..310785209d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/iam_aws.go @@ -0,0 +1,250 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "time" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +const DefaultExpiryWindow = time.Second * 10 // 10 secs + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + endpoint string +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + defaultIAMRoleEndpoint = "http://169.254.169.254" + defaultECSRoleEndpoint = "http://169.254.170.2" + defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" +) + +// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html +func getEndpoint(endpoint string) (string, bool) { + if endpoint != "" { + return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" + } + if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { + return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true + } + return defaultIAMRoleEndpoint, false +} + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + p := &IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + endpoint: endpoint, + } + return New(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + endpoint, isEcsTask := getEndpoint(m.endpoint) + var roleCreds ec2RoleCredRespBody + var err error + if isEcsTask { + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + } else { + roleCreds, err = getCredentials(m.Client, endpoint) + } + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = defaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go new file mode 100644 index 0000000000..b794333056 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/signature-type.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go new file mode 100644 index 0000000000..7dde00b0a1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go new file mode 100644 index 0000000000..03134c3d2c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_client_grants.go @@ -0,0 +1,162 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make MinIO API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + Token string + Expiry int +} + +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + stsEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + stsEndpoint: stsEndpoint, + getClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { + + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) + v.Set("Version", "2011-06-15") + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + u.RawQuery = v.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go new file mode 100644 index 0000000000..4d53bd2367 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/credentials/sts_web_identity.go @@ -0,0 +1,158 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make MinIO API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + Token string + Expiry int +} + +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + stsEndpoint string + + // getWebIDTokenExpiry function which returns ID tokens + // from IDP. This function should return two values one + // is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + getWebIDTokenExpiry func() (*WebIdentityToken, error) +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + return New(&STSWebIdentity{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + stsEndpoint: stsEndpoint, + getWebIDTokenExpiry: getWebIDTokenExpiry, + }), nil +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint string, + getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + v.Set("WebIdentityToken", idToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + v.Set("Version", "2011-06-15") + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go new file mode 100644 index 0000000000..ac0b69a02a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/encrypt/server-side.go @@ -0,0 +1,195 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "net/http" + + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseKmsKeyID is the AWS SSE-KMS key id. + sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" + // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. + sseEncryptionContext = sseGenericHeader + "-Encryption-Context" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(sseGenericHeader, "aws:kms") + h.Set(sseKmsKeyID, s.key) + if s.hasContext { + h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go new file mode 100644 index 0000000000..c82e6bd372 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-streaming.go @@ -0,0 +1,306 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string) string { + + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time) *http.Request { + + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, io.ErrUnexpectedEOF + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go new file mode 100644 index 0000000000..40ba07130a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v2.go @@ -0,0 +1,316 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } + } + path = s3utils.EncodePath(req.URL.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go new file mode 100644 index 0000000000..ab96b58c52 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/request-signature-v4.go @@ -0,0 +1,315 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var v4IgnoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(getHostAddr(&req)) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go new file mode 100644 index 0000000000..d07ff6dec7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/s3signer/utils.go @@ -0,0 +1,58 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "crypto/hmac" + "crypto/sha256" + "net/http" + "strings" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go new file mode 100644 index 0000000000..9af2997b74 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/s3utils/utils.go @@ -0,0 +1,331 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start with a "." + if host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" +} + +// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + switch endpointURL.Host { + case "s3-fips.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-west-1.amazonaws.com": + case "s3-fips.dualstack.us-west-2.amazonaws.com": + case "s3-fips.dualstack.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-east-1.amazonaws.com": + case "s3-fips.us-west-1.amazonaws.com": + case "s3-fips.us-west-2.amazonaws.com": + case "s3-fips.us-east-1.amazonaws.com": + default: + return false + } + return true +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.Replace(s, "/", "%2F", -1) +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be smaller than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be greater than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be greater than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go new file mode 100644 index 0000000000..e220271bb7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/pkg/set/stringset.go @@ -0,0 +1,197 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "encoding/json" + "fmt" + "sort" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/v6/post-policy.go b/vendor/github.com/minio/minio-go/v6/post-policy.go new file mode 100644 index 0000000000..f9250b2a89 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/post-policy.go @@ -0,0 +1,270 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return ErrInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return ErrInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return ErrInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return ErrInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return ErrInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return ErrInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return ErrInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return ErrInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return ErrInvalidArgument("Key is empty") + } + if value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return ErrInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// Stringer interface for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshalled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshalled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/v6/retry-continous.go b/vendor/github.com/minio/minio-go/v6/retry-continous.go new file mode 100644 index 0000000000..3d25883b0c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/retry-continous.go @@ -0,0 +1,69 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/v6/retry.go b/vendor/github.com/minio/minio-go/v6/retry.go new file mode 100644 index 0000000000..2c608bab54 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/retry.go @@ -0,0 +1,157 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 1 second. +const DefaultRetryUnit = time.Second + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +const DefaultRetryCap = time.Second * 30 + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + // Attempts start from 1. + case attemptCh <- i + 1: + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(i)) + } + }() + return attemptCh +} + +// isHTTPReqErrorRetryable - is http requests error retryable, such +// as i/o timeout, connection broken etc.. +func isHTTPReqErrorRetryable(err error) bool { + if err == nil { + return false + } + switch e := err.(type) { + case *url.Error: + switch e.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + } + if strings.Contains(err.Error(), "Connection closed by foreign host") { + return true + } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + // If error is - tlsHandshakeTimeoutError, retry. + return true + } else if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError, retry. + return true + } else if strings.Contains(err.Error(), "connection timed out") { + // If err is a net.Dial timeout, retry. + return true + } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { + // If error is transport connection broken, retry. + return true + } else if strings.Contains(err.Error(), "net/http: timeout awaiting response headers") { + // Retry errors due to server not sending the response before timeout + return true + } + } + return false +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/vendor/github.com/minio/minio-go/v6/s3-endpoints.go b/vendor/github.com/minio/minio-go/v6/s3-endpoints.go new file mode 100644 index 0000000000..989f58c7ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/s3-endpoints.go @@ -0,0 +1,53 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", + "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", + "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", + "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", + "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", + "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", + "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", + "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", + "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", + "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", + "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", + "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", + "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", + "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", + "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", + "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. + s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" + } + return s3Endpoint +} diff --git a/vendor/github.com/minio/minio-go/v6/s3-error.go b/vendor/github.com/minio/minio-go/v6/s3-error.go new file mode 100644 index 0000000000..f365157eea --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/s3-error.go @@ -0,0 +1,61 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/v6/staticcheck.conf b/vendor/github.com/minio/minio-go/v6/staticcheck.conf new file mode 100644 index 0000000000..71cc6f536a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1005", "-ST1017", "-SA9004", "-ST1000", "-S1021"] \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v6/transport.go b/vendor/github.com/minio/minio-go/v6/transport.go new file mode 100644 index 0000000000..34efa89802 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/transport.go @@ -0,0 +1,82 @@ +// +build go1.7 go1.8 + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "time" + + "golang.org/x/net/http2" +) + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport = func(secure bool) (http.RoundTripper, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + // In some systems (like Windows) system cert pool is + // not supported or no certificates are present on the + // system - so we create a new cert pool. + rootCAs = x509.NewCertPool() + } + + // Keep TLS config. + tlsConfig := &tls.Config{ + RootCAs: rootCAs, + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + tr.TLSClientConfig = tlsConfig + + // Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2. + // See https://github.com/golang/go/issues/14275 + if err := http2.ConfigureTransport(tr); err != nil { + return nil, err + } + } + return tr, nil +} diff --git a/vendor/github.com/minio/minio-go/v6/utils.go b/vendor/github.com/minio/minio-go/v6/utils.go new file mode 100644 index 0000000000..fc30c1ab7c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v6/utils.go @@ -0,0 +1,272 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/minio/minio-go/v6/pkg/s3utils" +) + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := sha256.New() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := md5.New() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return ErrInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(endpointURL.Host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return ErrInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// make a copy of http.Header +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// Filter relevant response headers from +// the HEAD, GET http response. The function takes +// a list of headers which are filtered out and +// returned as a new http header. +func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { + filteredHeader = cloneHeader(header) + for _, key := range filterKeys { + filteredHeader.Del(key) + } + return filteredHeader +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + /// Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = []string{ + "content-type", + "cache-control", + "content-encoding", + "content-disposition", + "content-language", + "x-amz-website-redirect-location", + "expires", + // Add more supported headers here. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.EqualFold(amzStorageClass, headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, header := range supportedHeaders { + if strings.ToLower(header) == key { + return true + } + } + return false +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = []string{ + "x-amz-server-side-encryption", + "x-amz-server-side-encryption-aws-kms-key-id", + "x-amz-server-side-encryption-context", + "x-amz-server-side-encryption-customer-algorithm", + "x-amz-server-side-encryption-customer-key", + "x-amz-server-side-encryption-customer-key-MD5", +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range sseHeaders { + if strings.ToLower(h) == key { + return true + } + } + return false +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) +} diff --git a/vendor/github.com/thanos-io/thanos/LICENSE b/vendor/github.com/thanos-io/thanos/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go new file mode 100644 index 0000000000..96d6434924 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -0,0 +1,155 @@ +// Package block contains common functionality for interacting with TSDB blocks +// in the context of Thanos. +package block + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "path" + "path/filepath" + + "github.com/thanos-io/thanos/pkg/block/metadata" + + "fmt" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" +) + +const ( + // MetaFilename is the known JSON filename for meta information. + MetaFilename = "meta.json" + // IndexFilename is the known index file for block index. + IndexFilename = "index" + // IndexCacheFilename is the canonical name for index cache file that stores essential information needed. + IndexCacheFilename = "index.cache.json" + // ChunksDirname is the known dir name for chunks with compressed samples. + ChunksDirname = "chunks" + + // DebugMetas is a directory for debug meta files that happen in the past. Useful for debugging. + DebugMetas = "debug/metas" +) + +// Download downloads directory that is mean to be block directory. +func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id ulid.ULID, dst string) error { + if err := objstore.DownloadDir(ctx, logger, bucket, id.String(), dst); err != nil { + return err + } + + chunksDir := filepath.Join(dst, ChunksDirname) + _, err := os.Stat(chunksDir) + if os.IsNotExist(err) { + // This can happen if block is empty. We cannot easily upload empty directory, so create one here. + return os.Mkdir(chunksDir, os.ModePerm) + } + + if err != nil { + return errors.Wrapf(err, "stat %s", chunksDir) + } + + return nil +} + +// Upload uploads block from given block dir that ends with block id. +// It makes sure cleanup is done on error to avoid partial block uploads. +// It also verifies basic features of Thanos block. +// TODO(bplotka): Ensure bucket operations have reasonable backoff retries. +func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir string) error { + df, err := os.Stat(bdir) + if err != nil { + return errors.Wrap(err, "stat bdir") + } + if !df.IsDir() { + return errors.Errorf("%s is not a directory", bdir) + } + + // Verify dir. + id, err := ulid.Parse(df.Name()) + if err != nil { + return errors.Wrap(err, "not a block dir") + } + + meta, err := metadata.Read(bdir) + if err != nil { + // No meta or broken meta file. + return errors.Wrap(err, "read meta") + } + + if meta.Thanos.Labels == nil || len(meta.Thanos.Labels) == 0 { + return errors.Errorf("empty external labels are not allowed for Thanos block.") + } + + if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(DebugMetas, fmt.Sprintf("%s.json", id))); err != nil { + return errors.Wrap(err, "upload meta file to debug dir") + } + + if err := objstore.UploadDir(ctx, logger, bkt, path.Join(bdir, ChunksDirname), path.Join(id.String(), ChunksDirname)); err != nil { + return cleanUp(bkt, id, errors.Wrap(err, "upload chunks")) + } + + if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, IndexFilename), path.Join(id.String(), IndexFilename)); err != nil { + return cleanUp(bkt, id, errors.Wrap(err, "upload index")) + } + + if meta.Thanos.Source == metadata.CompactorSource { + if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, IndexCacheFilename), path.Join(id.String(), IndexCacheFilename)); err != nil { + return cleanUp(bkt, id, errors.Wrap(err, "upload index cache")) + } + } + + // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file + // to be pending uploads. + if err := objstore.UploadFile(ctx, logger, bkt, path.Join(bdir, MetaFilename), path.Join(id.String(), MetaFilename)); err != nil { + return cleanUp(bkt, id, errors.Wrap(err, "upload meta file")) + } + + return nil +} + +func cleanUp(bkt objstore.Bucket, id ulid.ULID, err error) error { + // Cleanup the dir with an uncancelable context. + cleanErr := Delete(context.Background(), bkt, id) + if cleanErr != nil { + return errors.Wrapf(err, "failed to clean block after upload issue. Partial block in system. Err: %s", err.Error()) + } + return err +} + +// Delete removes directory that is mean to be block directory. +// NOTE: Prefer this method instead of objstore.Delete to avoid deleting empty dir (whole bucket) by mistake. +func Delete(ctx context.Context, bucket objstore.Bucket, id ulid.ULID) error { + return objstore.DeleteDir(ctx, bucket, id.String()) +} + +// DownloadMeta downloads only meta file from bucket by block ID. +// TODO(bwplotka): Differentiate between network error & partial upload. +func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (metadata.Meta, error) { + rc, err := bkt.Get(ctx, path.Join(id.String(), MetaFilename)) + if err != nil { + return metadata.Meta{}, errors.Wrapf(err, "meta.json bkt get for %s", id.String()) + } + defer runutil.CloseWithLogOnErr(logger, rc, "download meta bucket client") + + var m metadata.Meta + + obj, err := ioutil.ReadAll(rc) + if err != nil { + return metadata.Meta{}, errors.Wrapf(err, "read meta.json for block %s", id.String()) + } + + if err = json.Unmarshal(obj, &m); err != nil { + return metadata.Meta{}, errors.Wrapf(err, "unmarshal meta.json for block %s", id.String()) + } + + return m, nil +} + +func IsBlockDir(path string) (id ulid.ULID, ok bool) { + id, err := ulid.Parse(filepath.Base(path)) + return id, err == nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go new file mode 100644 index 0000000000..707d922f90 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go @@ -0,0 +1,771 @@ +package block + +import ( + "encoding/json" + "fmt" + "hash/crc32" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/thanos-io/thanos/pkg/block/metadata" + + "github.com/prometheus/prometheus/tsdb/fileutil" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/runutil" +) + +const ( + // IndexCacheVersion is a enumeration of index cache versions supported by Thanos. + IndexCacheVersion1 = iota + 1 +) + +type postingsRange struct { + Name, Value string + Start, End int64 +} + +type indexCache struct { + Version int + CacheVersion int + Symbols map[uint32]string + LabelValues map[string][]string + Postings []postingsRange +} + +type realByteSlice []byte + +func (b realByteSlice) Len() int { + return len(b) +} + +func (b realByteSlice) Range(start, end int) []byte { + return b[start:end] +} + +func (b realByteSlice) Sub(start, end int) index.ByteSlice { + return b[start:end] +} + +func getSymbolTable(b index.ByteSlice) (map[uint32]string, error) { + version := int(b.Range(4, 5)[0]) + + if version != 1 && version != 2 { + return nil, errors.Errorf("unknown index file version %d", version) + } + + toc, err := index.NewTOCFromByteSlice(b) + if err != nil { + return nil, errors.Wrap(err, "read TOC") + } + + symbolsV2, symbolsV1, err := index.ReadSymbols(b, version, int(toc.Symbols)) + if err != nil { + return nil, errors.Wrap(err, "read symbols") + } + + symbolsTable := make(map[uint32]string, len(symbolsV1)+len(symbolsV2)) + for o, s := range symbolsV1 { + symbolsTable[o] = s + } + for o, s := range symbolsV2 { + symbolsTable[uint32(o)] = s + } + + return symbolsTable, nil +} + +// WriteIndexCache writes a cache file containing the first lookup stages +// for an index file. +func WriteIndexCache(logger log.Logger, indexFn string, fn string) error { + indexFile, err := fileutil.OpenMmapFile(indexFn) + if err != nil { + return errors.Wrapf(err, "open mmap index file %s", indexFn) + } + defer runutil.CloseWithLogOnErr(logger, indexFile, "close index cache mmap file from %s", indexFn) + + b := realByteSlice(indexFile.Bytes()) + indexr, err := index.NewReader(b) + if err != nil { + return errors.Wrap(err, "open index reader") + } + defer runutil.CloseWithLogOnErr(logger, indexr, "load index cache reader") + + // We assume reader verified index already. + symbols, err := getSymbolTable(b) + if err != nil { + return err + } + + f, err := os.Create(fn) + if err != nil { + return errors.Wrap(err, "create index cache file") + } + defer runutil.CloseWithLogOnErr(logger, f, "index cache writer") + + v := indexCache{ + Version: indexr.Version(), + CacheVersion: IndexCacheVersion1, + Symbols: symbols, + LabelValues: map[string][]string{}, + } + + // Extract label value indices. + lnames, err := indexr.LabelIndices() + if err != nil { + return errors.Wrap(err, "read label indices") + } + for _, lns := range lnames { + if len(lns) != 1 { + continue + } + ln := lns[0] + + tpls, err := indexr.LabelValues(ln) + if err != nil { + return errors.Wrap(err, "get label values") + } + vals := make([]string, 0, tpls.Len()) + + for i := 0; i < tpls.Len(); i++ { + v, err := tpls.At(i) + if err != nil { + return errors.Wrap(err, "get label value") + } + if len(v) != 1 { + return errors.Errorf("unexpected tuple length %d", len(v)) + } + vals = append(vals, v[0]) + } + v.LabelValues[ln] = vals + } + + // Extract postings ranges. + pranges, err := indexr.PostingsRanges() + if err != nil { + return errors.Wrap(err, "read postings ranges") + } + for l, rng := range pranges { + v.Postings = append(v.Postings, postingsRange{ + Name: l.Name, + Value: l.Value, + Start: rng.Start, + End: rng.End, + }) + } + + if err := json.NewEncoder(f).Encode(&v); err != nil { + return errors.Wrap(err, "encode file") + } + return nil +} + +// ReadIndexCache reads an index cache file. +func ReadIndexCache(logger log.Logger, fn string) ( + version int, + symbols []string, + lvals map[string][]string, + postings map[labels.Label]index.Range, + err error, +) { + f, err := os.Open(fn) + if err != nil { + return 0, nil, nil, nil, errors.Wrap(err, "open file") + } + defer runutil.CloseWithLogOnErr(logger, f, "index reader") + + var v indexCache + + bytes, err := ioutil.ReadFile(fn) + if err != nil { + return 0, nil, nil, nil, errors.Wrap(err, "read file") + } + + if err = json.Unmarshal(bytes, &v); err != nil { + return 0, nil, nil, nil, errors.Wrap(err, "unmarshal index cache") + } + + strs := map[string]string{} + lvals = make(map[string][]string, len(v.LabelValues)) + postings = make(map[labels.Label]index.Range, len(v.Postings)) + + var maxSymbolID uint32 + for o := range v.Symbols { + if o > maxSymbolID { + maxSymbolID = o + } + } + symbols = make([]string, maxSymbolID+1) + + // Most strings we encounter are duplicates. Dedup string objects that we keep + // around after the function returns to reduce total memory usage. + // NOTE(fabxc): it could even make sense to deduplicate globally. + getStr := func(s string) string { + if cs, ok := strs[s]; ok { + return cs + } + strs[s] = s + return s + } + + for o, s := range v.Symbols { + symbols[o] = getStr(s) + } + for ln, vals := range v.LabelValues { + for i := range vals { + vals[i] = getStr(vals[i]) + } + lvals[getStr(ln)] = vals + } + for _, e := range v.Postings { + l := labels.Label{ + Name: getStr(e.Name), + Value: getStr(e.Value), + } + postings[l] = index.Range{Start: e.Start, End: e.End} + } + return v.Version, symbols, lvals, postings, nil +} + +// VerifyIndex does a full run over a block index and verifies that it fulfills the order invariants. +func VerifyIndex(logger log.Logger, fn string, minTime int64, maxTime int64) error { + stats, err := GatherIndexIssueStats(logger, fn, minTime, maxTime) + if err != nil { + return err + } + + return stats.AnyErr() +} + +type Stats struct { + // TotalSeries represents total number of series in block. + TotalSeries int + // OutOfOrderSeries represents number of series that have out of order chunks. + OutOfOrderSeries int + + // OutOfOrderChunks represents number of chunks that are out of order (older time range is after younger one) + OutOfOrderChunks int + // DuplicatedChunks represents number of chunks with same time ranges within same series, potential duplicates. + DuplicatedChunks int + // OutsideChunks represents number of all chunks that are before or after time range specified in block meta. + OutsideChunks int + // CompleteOutsideChunks is subset of OutsideChunks that will be never accessed. They are completely out of time range specified in block meta. + CompleteOutsideChunks int + // Issue347OutsideChunks represents subset of OutsideChunks that are outsiders caused by https://github.com/prometheus/tsdb/issues/347 + // and is something that Thanos handle. + // + // Specifically we mean here chunks with minTime == block.maxTime and maxTime > block.MaxTime. These are + // are segregated into separate counters. These chunks are safe to be deleted, since they are duplicated across 2 blocks. + Issue347OutsideChunks int + // OutOfOrderLabels represents the number of postings that contained out + // of order labels, a bug present in Prometheus 2.8.0 and below. + OutOfOrderLabels int +} + +// PrometheusIssue5372Err returns an error if the Stats object indicates +// postings with out of order labels. This is corrected by Prometheus Issue +// #5372 and affects Prometheus versions 2.8.0 and below. +func (i Stats) PrometheusIssue5372Err() error { + if i.OutOfOrderLabels > 0 { + return errors.Errorf("index contains %d postings with out of order labels", + i.OutOfOrderLabels) + } + return nil +} + +// Issue347OutsideChunksErr returns error if stats indicates issue347 block issue, that is repaired explicitly before compaction (on plan block). +func (i Stats) Issue347OutsideChunksErr() error { + if i.Issue347OutsideChunks > 0 { + return errors.Errorf("found %d chunks outside the block time range introduced by https://github.com/prometheus/tsdb/issues/347", i.Issue347OutsideChunks) + } + return nil +} + +// CriticalErr returns error if stats indicates critical block issue, that might solved only by manual repair procedure. +func (i Stats) CriticalErr() error { + var errMsg []string + + if i.OutOfOrderSeries > 0 { + errMsg = append(errMsg, fmt.Sprintf( + "%d/%d series have an average of %.3f out-of-order chunks: "+ + "%.3f of these are exact duplicates (in terms of data and time range)", + i.OutOfOrderSeries, + i.TotalSeries, + float64(i.OutOfOrderChunks)/float64(i.OutOfOrderSeries), + float64(i.DuplicatedChunks)/float64(i.OutOfOrderChunks), + )) + } + + n := i.OutsideChunks - (i.CompleteOutsideChunks + i.Issue347OutsideChunks) + if n > 0 { + errMsg = append(errMsg, fmt.Sprintf("found %d chunks non-completely outside the block time range", n)) + } + + if i.CompleteOutsideChunks > 0 { + errMsg = append(errMsg, fmt.Sprintf("found %d chunks completely outside the block time range", i.CompleteOutsideChunks)) + } + + if len(errMsg) > 0 { + return errors.New(strings.Join(errMsg, ", ")) + } + + return nil +} + +// AnyErr returns error if stats indicates any block issue. +func (i Stats) AnyErr() error { + var errMsg []string + + if err := i.CriticalErr(); err != nil { + errMsg = append(errMsg, err.Error()) + } + + if err := i.Issue347OutsideChunksErr(); err != nil { + errMsg = append(errMsg, err.Error()) + } + + if err := i.PrometheusIssue5372Err(); err != nil { + errMsg = append(errMsg, err.Error()) + } + + if len(errMsg) > 0 { + return errors.New(strings.Join(errMsg, ", ")) + } + + return nil +} + +// GatherIndexIssueStats returns useful counters as well as outsider chunks (chunks outside of block time range) that +// helps to assess index health. +// It considers https://github.com/prometheus/tsdb/issues/347 as something that Thanos can handle. +// See Stats.Issue347OutsideChunks for details. +func GatherIndexIssueStats(logger log.Logger, fn string, minTime int64, maxTime int64) (stats Stats, err error) { + r, err := index.NewFileReader(fn) + if err != nil { + return stats, errors.Wrap(err, "open index file") + } + defer runutil.CloseWithErrCapture(&err, r, "gather index issue file reader") + + p, err := r.Postings(index.AllPostingsKey()) + if err != nil { + return stats, errors.Wrap(err, "get all postings") + } + var ( + lastLset labels.Labels + lset labels.Labels + chks []chunks.Meta + ) + + // Per series. + for p.Next() { + lastLset = append(lastLset[:0], lset...) + + id := p.At() + stats.TotalSeries++ + + if err := r.Series(id, &lset, &chks); err != nil { + return stats, errors.Wrap(err, "read series") + } + if len(lset) == 0 { + return stats, errors.Errorf("empty label set detected for series %d", id) + } + if lastLset != nil && labels.Compare(lastLset, lset) >= 0 { + return stats, errors.Errorf("series %v out of order; previous %v", lset, lastLset) + } + l0 := lset[0] + for _, l := range lset[1:] { + if l.Name < l0.Name { + stats.OutOfOrderLabels++ + level.Warn(logger).Log("msg", + "out-of-order label set: known bug in Prometheus 2.8.0 and below", + "labelset", lset.String(), + "series", fmt.Sprintf("%d", id), + ) + } + l0 = l + } + if len(chks) == 0 { + return stats, errors.Errorf("empty chunks for series %d", id) + } + + ooo := 0 + // Per chunk in series. + for i, c := range chks { + // Chunk vs the block ranges. + if c.MinTime < minTime || c.MaxTime > maxTime { + stats.OutsideChunks++ + if c.MinTime > maxTime || c.MaxTime < minTime { + stats.CompleteOutsideChunks++ + } else if c.MinTime == maxTime { + stats.Issue347OutsideChunks++ + } + } + + if i == 0 { + continue + } + + c0 := chks[i-1] + + // Chunk order within block. + if c.MinTime > c0.MaxTime { + continue + } + + if c.MinTime == c0.MinTime && c.MaxTime == c0.MaxTime { + // TODO(bplotka): Calc and check checksum from chunks itself. + // The chunks can overlap 1:1 in time, but does not have same data. + // We assume same data for simplicity, but it can be a symptom of error. + stats.DuplicatedChunks++ + continue + } + // Chunks partly overlaps or out of order. + ooo++ + } + if ooo > 0 { + stats.OutOfOrderSeries++ + stats.OutOfOrderChunks += ooo + } + } + if p.Err() != nil { + return stats, errors.Wrap(err, "walk postings") + } + + return stats, nil +} + +type ignoreFnType func(mint, maxt int64, prev *chunks.Meta, curr *chunks.Meta) (bool, error) + +// Repair open the block with given id in dir and creates a new one with fixed data. +// It: +// - removes out of order duplicates +// - all "complete" outsiders (they will not accessed anyway) +// - removes all near "complete" outside chunks introduced by https://github.com/prometheus/tsdb/issues/347. +// Fixable inconsistencies are resolved in the new block. +// TODO(bplotka): https://github.com/thanos-io/thanos/issues/378 +func Repair(logger log.Logger, dir string, id ulid.ULID, source metadata.SourceType, ignoreChkFns ...ignoreFnType) (resid ulid.ULID, err error) { + if len(ignoreChkFns) == 0 { + return resid, errors.New("no ignore chunk function specified") + } + + bdir := filepath.Join(dir, id.String()) + entropy := rand.New(rand.NewSource(time.Now().UnixNano())) + resid = ulid.MustNew(ulid.Now(), entropy) + + meta, err := metadata.Read(bdir) + if err != nil { + return resid, errors.Wrap(err, "read meta file") + } + if meta.Thanos.Downsample.Resolution > 0 { + return resid, errors.New("cannot repair downsampled block") + } + + b, err := tsdb.OpenBlock(logger, bdir, nil) + if err != nil { + return resid, errors.Wrap(err, "open block") + } + defer runutil.CloseWithErrCapture(&err, b, "repair block reader") + + indexr, err := b.Index() + if err != nil { + return resid, errors.Wrap(err, "open index") + } + defer runutil.CloseWithErrCapture(&err, indexr, "repair index reader") + + chunkr, err := b.Chunks() + if err != nil { + return resid, errors.Wrap(err, "open chunks") + } + defer runutil.CloseWithErrCapture(&err, chunkr, "repair chunk reader") + + resdir := filepath.Join(dir, resid.String()) + + chunkw, err := chunks.NewWriter(filepath.Join(resdir, ChunksDirname)) + if err != nil { + return resid, errors.Wrap(err, "open chunk writer") + } + defer runutil.CloseWithErrCapture(&err, chunkw, "repair chunk writer") + + indexw, err := index.NewWriter(filepath.Join(resdir, IndexFilename)) + if err != nil { + return resid, errors.Wrap(err, "open index writer") + } + defer runutil.CloseWithErrCapture(&err, indexw, "repair index writer") + + // TODO(fabxc): adapt so we properly handle the version once we update to an upstream + // that has multiple. + resmeta := *meta + resmeta.ULID = resid + resmeta.Stats = tsdb.BlockStats{} // reset stats + resmeta.Thanos.Source = source // update source + + if err := rewrite(logger, indexr, chunkr, indexw, chunkw, &resmeta, ignoreChkFns); err != nil { + return resid, errors.Wrap(err, "rewrite block") + } + if err := metadata.Write(logger, resdir, &resmeta); err != nil { + return resid, err + } + // TSDB may rewrite metadata in bdir. + // TODO: This is not needed in newer TSDB code. See + // https://github.com/prometheus/tsdb/pull/637 + if err := metadata.Write(logger, bdir, meta); err != nil { + return resid, err + } + return resid, nil +} + +var castagnoli = crc32.MakeTable(crc32.Castagnoli) + +func IgnoreCompleteOutsideChunk(mint int64, maxt int64, _ *chunks.Meta, curr *chunks.Meta) (bool, error) { + if curr.MinTime > maxt || curr.MaxTime < mint { + // "Complete" outsider. Ignore. + return true, nil + } + return false, nil +} + +func IgnoreIssue347OutsideChunk(_ int64, maxt int64, _ *chunks.Meta, curr *chunks.Meta) (bool, error) { + if curr.MinTime == maxt { + // "Near" outsider from issue https://github.com/prometheus/tsdb/issues/347. Ignore. + return true, nil + } + return false, nil +} + +func IgnoreDuplicateOutsideChunk(_ int64, _ int64, last *chunks.Meta, curr *chunks.Meta) (bool, error) { + if last == nil { + return false, nil + } + + if curr.MinTime > last.MaxTime { + return false, nil + } + + // Verify that the overlapping chunks are exact copies so we can safely discard + // the current one. + if curr.MinTime != last.MinTime || curr.MaxTime != last.MaxTime { + return false, errors.Errorf("non-sequential chunks not equal: [%d, %d] and [%d, %d]", + last.MinTime, last.MaxTime, curr.MinTime, curr.MaxTime) + } + ca := crc32.Checksum(last.Chunk.Bytes(), castagnoli) + cb := crc32.Checksum(curr.Chunk.Bytes(), castagnoli) + + if ca != cb { + return false, errors.Errorf("non-sequential chunks not equal: %x and %x", ca, cb) + } + + return true, nil +} + +// sanitizeChunkSequence ensures order of the input chunks and drops any duplicates. +// It errors if the sequence contains non-dedupable overlaps. +func sanitizeChunkSequence(chks []chunks.Meta, mint int64, maxt int64, ignoreChkFns []ignoreFnType) ([]chunks.Meta, error) { + if len(chks) == 0 { + return nil, nil + } + // First, ensure that chunks are ordered by their start time. + sort.Slice(chks, func(i, j int) bool { + return chks[i].MinTime < chks[j].MinTime + }) + + // Remove duplicates, complete outsiders and near outsiders. + repl := make([]chunks.Meta, 0, len(chks)) + var last *chunks.Meta + +OUTER: + // This compares the current chunk to the chunk from the last iteration + // by pointers. If we use "i, c := range chks" the variable c is a new + // variable who's address doesn't change through the entire loop. + // The current element of the chks slice is copied into it. We must take + // the address of the indexed slice instead. + for i := range chks { + for _, ignoreChkFn := range ignoreChkFns { + ignore, err := ignoreChkFn(mint, maxt, last, &chks[i]) + if err != nil { + return nil, errors.Wrap(err, "ignore function") + } + + if ignore { + continue OUTER + } + } + + last = &chks[i] + repl = append(repl, chks[i]) + } + + return repl, nil +} + +type seriesRepair struct { + lset labels.Labels + chks []chunks.Meta +} + +// rewrite writes all data from the readers back into the writers while cleaning +// up mis-ordered and duplicated chunks. +func rewrite( + logger log.Logger, + indexr tsdb.IndexReader, chunkr tsdb.ChunkReader, + indexw tsdb.IndexWriter, chunkw tsdb.ChunkWriter, + meta *metadata.Meta, + ignoreChkFns []ignoreFnType, +) error { + symbols, err := indexr.Symbols() + if err != nil { + return err + } + if err := indexw.AddSymbols(symbols); err != nil { + return err + } + + all, err := indexr.Postings(index.AllPostingsKey()) + if err != nil { + return err + } + all = indexr.SortedPostings(all) + + // We fully rebuild the postings list index from merged series. + var ( + postings = index.NewMemPostings() + values = map[string]stringset{} + i = uint64(0) + series = []seriesRepair{} + ) + + for all.Next() { + var lset labels.Labels + var chks []chunks.Meta + id := all.At() + + if err := indexr.Series(id, &lset, &chks); err != nil { + return err + } + // Make sure labels are in sorted order. + sort.Sort(lset) + + for i, c := range chks { + chks[i].Chunk, err = chunkr.Chunk(c.Ref) + if err != nil { + return err + } + } + + chks, err := sanitizeChunkSequence(chks, meta.MinTime, meta.MaxTime, ignoreChkFns) + if err != nil { + return err + } + + if len(chks) == 0 { + continue + } + + series = append(series, seriesRepair{ + lset: lset, + chks: chks, + }) + } + + if all.Err() != nil { + return errors.Wrap(all.Err(), "iterate series") + } + + // Sort the series, if labels are re-ordered then the ordering of series + // will be different. + sort.Slice(series, func(i, j int) bool { + return labels.Compare(series[i].lset, series[j].lset) < 0 + }) + + lastSet := labels.Labels{} + // Build a new TSDB block. + for _, s := range series { + // The TSDB library will throw an error if we add a series with + // identical labels as the last series. This means that we have + // discovered a duplicate time series in the old block. We drop + // all duplicate series preserving the first one. + // TODO: Add metric to count dropped series if repair becomes a daemon + // rather than a batch job. + if labels.Compare(lastSet, s.lset) == 0 { + level.Warn(logger).Log("msg", + "dropping duplicate series in tsdb block found", + "labelset", s.lset.String(), + ) + continue + } + if err := chunkw.WriteChunks(s.chks...); err != nil { + return errors.Wrap(err, "write chunks") + } + if err := indexw.AddSeries(i, s.lset, s.chks...); err != nil { + return errors.Wrap(err, "add series") + } + + meta.Stats.NumChunks += uint64(len(s.chks)) + meta.Stats.NumSeries++ + + for _, chk := range s.chks { + meta.Stats.NumSamples += uint64(chk.Chunk.NumSamples()) + } + + for _, l := range s.lset { + valset, ok := values[l.Name] + if !ok { + valset = stringset{} + values[l.Name] = valset + } + valset.set(l.Value) + } + postings.Add(i, s.lset) + i++ + lastSet = s.lset + } + + s := make([]string, 0, 256) + for n, v := range values { + s = s[:0] + + for x := range v { + s = append(s, x) + } + if err := indexw.WriteLabelIndex([]string{n}, s); err != nil { + return errors.Wrap(err, "write label index") + } + } + + for _, l := range postings.SortedKeys() { + if err := indexw.WritePostings(l.Name, l.Value, postings.Get(l.Name, l.Value)); err != nil { + return errors.Wrap(err, "write postings") + } + } + return nil +} + +type stringset map[string]struct{} + +func (ss stringset) set(s string) { + ss[s] = struct{}{} +} + +func (ss stringset) String() string { + return strings.Join(ss.slice(), ",") +} + +func (ss stringset) slice() []string { + slice := make([]string, 0, len(ss)) + for k := range ss { + slice = append(slice, k) + } + sort.Strings(slice) + return slice +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go new file mode 100644 index 0000000000..fd664c1aa3 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/block/metadata/meta.go @@ -0,0 +1,146 @@ +package metadata + +// metadata package implements writing and reading wrapped meta.json where Thanos puts its metadata. +// Those metadata contains external labels, downsampling resolution and source type. +// This package is minimal and separated because it used by testutils which limits test helpers we can use in +// this package. + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/thanos-io/thanos/pkg/runutil" +) + +type SourceType string + +const ( + UnknownSource SourceType = "" + SidecarSource SourceType = "sidecar" + ReceiveSource SourceType = "receive" + CompactorSource SourceType = "compactor" + CompactorRepairSource SourceType = "compactor.repair" + RulerSource SourceType = "ruler" + BucketRepairSource SourceType = "bucket.repair" + TestSource SourceType = "test" +) + +const ( + // MetaFilename is the known JSON filename for meta information. + MetaFilename = "meta.json" +) + +const ( + // MetaVersion is a enumeration of meta versions supported by Thanos. + MetaVersion1 = iota + 1 +) + +// Meta describes the a block's meta. It wraps the known TSDB meta structure and +// extends it by Thanos-specific fields. +type Meta struct { + tsdb.BlockMeta + + Thanos Thanos `json:"thanos"` +} + +// Thanos holds block meta information specific to Thanos. +type Thanos struct { + Labels map[string]string `json:"labels"` + Downsample ThanosDownsample `json:"downsample"` + + // Source is a real upload source of the block. + Source SourceType `json:"source"` +} + +type ThanosDownsample struct { + Resolution int64 `json:"resolution"` +} + +// InjectThanos sets Thanos meta to the block meta JSON and saves it to the disk. +// NOTE: It should be used after writing any block by any Thanos component, otherwise we will miss crucial metadata. +func InjectThanos(logger log.Logger, bdir string, meta Thanos, downsampledMeta *tsdb.BlockMeta) (*Meta, error) { + newMeta, err := Read(bdir) + if err != nil { + return nil, errors.Wrap(err, "read new meta") + } + newMeta.Thanos = meta + + // While downsampling we need to copy original compaction. + if downsampledMeta != nil { + newMeta.Compaction = downsampledMeta.Compaction + } + + if err := Write(logger, bdir, newMeta); err != nil { + return nil, errors.Wrap(err, "write new meta") + } + + return newMeta, nil +} + +// Write writes the given meta into /meta.json. +func Write(logger log.Logger, dir string, meta *Meta) error { + // Make any changes to the file appear atomic. + path := filepath.Join(dir, MetaFilename) + tmp := path + ".tmp" + + f, err := os.Create(tmp) + if err != nil { + return err + } + + enc := json.NewEncoder(f) + enc.SetIndent("", "\t") + + if err := enc.Encode(meta); err != nil { + runutil.CloseWithLogOnErr(logger, f, "close meta") + return err + } + if err := f.Close(); err != nil { + return err + } + return renameFile(logger, tmp, path) +} + +func renameFile(logger log.Logger, from, to string) error { + if err := os.RemoveAll(to); err != nil { + return err + } + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := fileutil.OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + + if err = fileutil.Fdatasync(pdir); err != nil { + runutil.CloseWithLogOnErr(logger, pdir, "close dir") + return err + } + return pdir.Close() +} + +// Read reads the given meta from /meta.json. +func Read(dir string) (*Meta, error) { + b, err := ioutil.ReadFile(filepath.Join(dir, MetaFilename)) + if err != nil { + return nil, err + } + var m Meta + + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if m.Version != MetaVersion1 { + return nil, errors.Errorf("unexpected meta file version %d", m.Version) + } + return &m, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/aggr.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/aggr.go new file mode 100644 index 0000000000..9a96e94571 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/aggr.go @@ -0,0 +1,118 @@ +package downsample + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +// ChunkEncAggr is the top level encoding byte for the AggrChunk. +// It picks the highest number possible to prevent future collisions with wrapped encodings. +const ChunkEncAggr = chunkenc.Encoding(0xff) + +// AggrChunk is a chunk that is composed of a set of aggregates for the same underlying data. +// Not all aggregates must be present. +type AggrChunk []byte + +// EncodeAggrChunk encodes a new aggregate chunk from the array of chunks for each aggregate. +// Each array entry corresponds to the respective AggrType number. +func EncodeAggrChunk(chks [5]chunkenc.Chunk) *AggrChunk { + var b []byte + buf := [8]byte{} + + for _, c := range chks { + // Unset aggregates are marked with a zero length entry. + if c == nil { + n := binary.PutUvarint(buf[:], 0) + b = append(b, buf[:n]...) + continue + } + l := len(c.Bytes()) + n := binary.PutUvarint(buf[:], uint64(l)) + b = append(b, buf[:n]...) + b = append(b, byte(c.Encoding())) + b = append(b, c.Bytes()...) + } + chk := AggrChunk(b) + return &chk +} + +func (c AggrChunk) Bytes() []byte { + return []byte(c) +} + +func (c AggrChunk) Appender() (chunkenc.Appender, error) { + return nil, errors.New("not implemented") +} + +func (c AggrChunk) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + return chunkenc.NewNopIterator() +} + +func (c AggrChunk) NumSamples() int { + x, err := c.Get(AggrCount) + if err != nil { + return 0 + } + return x.NumSamples() +} + +// ErrAggrNotExist is returned if a requested aggregation is not present in an AggrChunk. +var ErrAggrNotExist = errors.New("aggregate does not exist") + +func (c AggrChunk) Encoding() chunkenc.Encoding { + return ChunkEncAggr +} + +// Get returns the sub-chunk for the given aggregate type if it exists. +func (c AggrChunk) Get(t AggrType) (chunkenc.Chunk, error) { + b := c[:] + var x []byte + + for i := AggrType(0); i <= t; i++ { + l, n := binary.Uvarint(b) + if n < 1 || len(b[n:]) < int(l)+1 { + return nil, errors.New("invalid size") + } + b = b[n:] + // If length is set to zero explicitly, that means the aggregate is unset. + if l == 0 { + if i == t { + return nil, ErrAggrNotExist + } + continue + } + x = b[:int(l)+1] + b = b[int(l)+1:] + } + return chunkenc.FromData(chunkenc.Encoding(x[0]), x[1:]) +} + +// AggrType represents an aggregation type. +type AggrType uint8 + +// Valid aggregations. +const ( + AggrCount AggrType = iota + AggrSum + AggrMin + AggrMax + AggrCounter +) + +func (t AggrType) String() string { + switch t { + case AggrCount: + return "count" + case AggrSum: + return "sum" + case AggrMin: + return "min" + case AggrMax: + return "max" + case AggrCounter: + return "counter" + } + return "" +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go new file mode 100644 index 0000000000..3c1deb7ef2 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -0,0 +1,661 @@ +package downsample + +import ( + "math" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/runutil" +) + +// Standard downsampling resolution levels in Thanos. +const ( + ResLevel0 = int64(0) // raw data + ResLevel1 = int64(5 * 60 * 1000) // 5 minutes in milliseconds + ResLevel2 = int64(60 * 60 * 1000) // 1 hour in milliseconds +) + +// Downsample downsamples the given block. It writes a new block into dir and returns its ID. +func Downsample( + logger log.Logger, + origMeta *metadata.Meta, + b tsdb.BlockReader, + dir string, + resolution int64, +) (id ulid.ULID, err error) { + if origMeta.Thanos.Downsample.Resolution >= resolution { + return id, errors.New("target resolution not lower than existing one") + } + + indexr, err := b.Index() + if err != nil { + return id, errors.Wrap(err, "open index reader") + } + defer runutil.CloseWithErrCapture(&err, indexr, "downsample index reader") + + chunkr, err := b.Chunks() + if err != nil { + return id, errors.Wrap(err, "open chunk reader") + } + defer runutil.CloseWithErrCapture(&err, chunkr, "downsample chunk reader") + + // Generate new block id. + uid := ulid.MustNew(ulid.Now(), rand.New(rand.NewSource(time.Now().UnixNano()))) + + // Create block directory to populate with chunks, meta and index files into. + blockDir := filepath.Join(dir, uid.String()) + if err := os.MkdirAll(blockDir, 0777); err != nil { + return id, errors.Wrap(err, "mkdir block dir") + } + + // Remove blockDir in case of errors. + defer func() { + if err != nil { + var merr tsdberrors.MultiError + merr.Add(err) + merr.Add(os.RemoveAll(blockDir)) + err = merr.Err() + } + }() + + // Copy original meta to the new one. Update downsampling resolution and ULID for a new block. + newMeta := *origMeta + newMeta.Thanos.Downsample.Resolution = resolution + newMeta.ULID = uid + + // Writes downsampled chunks right into the files, avoiding excess memory allocation. + // Flushes index and meta data after aggregations. + streamedBlockWriter, err := NewStreamedBlockWriter(blockDir, indexr, logger, newMeta) + if err != nil { + return id, errors.Wrap(err, "get streamed block writer") + } + defer runutil.CloseWithErrCapture(&err, streamedBlockWriter, "close stream block writer") + + postings, err := indexr.Postings(index.AllPostingsKey()) + if err != nil { + return id, errors.Wrap(err, "get all postings list") + } + + var ( + aggrChunks []*AggrChunk + all []sample + chks []chunks.Meta + lset labels.Labels + reuseIt chunkenc.Iterator + ) + for postings.Next() { + lset = lset[:0] + chks = chks[:0] + all = all[:0] + aggrChunks = aggrChunks[:0] + + // Get series labels and chunks. Downsampled data is sensitive to chunk boundaries + // and we need to preserve them to properly downsample previously downsampled data. + if err := indexr.Series(postings.At(), &lset, &chks); err != nil { + return id, errors.Wrapf(err, "get series %d", postings.At()) + } + // While #183 exists, we sanitize the chunks we retrieved from the block + // before retrieving their samples. + for i, c := range chks { + chk, err := chunkr.Chunk(c.Ref) + if err != nil { + return id, errors.Wrapf(err, "get chunk %d, series %d", c.Ref, postings.At()) + } + chks[i].Chunk = chk + } + + // Raw and already downsampled data need different processing. + if origMeta.Thanos.Downsample.Resolution == 0 { + for _, c := range chks { + if err := expandChunkIterator(c.Chunk.Iterator(reuseIt), &all); err != nil { + return id, errors.Wrapf(err, "expand chunk %d, series %d", c.Ref, postings.At()) + } + } + if err := streamedBlockWriter.WriteSeries(lset, downsampleRaw(all, resolution)); err != nil { + return id, errors.Wrapf(err, "downsample raw data, series: %d", postings.At()) + } + } else { + // Downsample a block that contains aggregated chunks already. + for _, c := range chks { + aggrChunks = append(aggrChunks, c.Chunk.(*AggrChunk)) + } + downsampledChunks, err := downsampleAggr( + aggrChunks, + &all, + chks[0].MinTime, + chks[len(chks)-1].MaxTime, + origMeta.Thanos.Downsample.Resolution, + resolution, + ) + if err != nil { + return id, errors.Wrapf(err, "downsample aggregate block, series: %d", postings.At()) + } + if err := streamedBlockWriter.WriteSeries(lset, downsampledChunks); err != nil { + return id, errors.Wrapf(err, "write series: %d", postings.At()) + } + } + } + if postings.Err() != nil { + return id, errors.Wrap(postings.Err(), "iterate series set") + } + + id = uid + return +} + +// currentWindow returns the end timestamp of the window that t falls into. +func currentWindow(t, r int64) int64 { + // The next timestamp is the next number after s.t that's aligned with window. + // We subtract 1 because block ranges are [from, to) and the last sample would + // go out of bounds otherwise. + return t - (t % r) + r - 1 +} + +// rangeFullness returns the fraction of how the range [mint, maxt] covered +// with count samples at the given step size. +// It return value is bounded to [0, 1]. +func rangeFullness(mint, maxt, step int64, count int) float64 { + f := float64(count) / (float64(maxt-mint) / float64(step)) + if f > 1 { + return 1 + } + return f +} + +// targetChunkCount calculates how many chunks should be produced when downsampling a series. +// It consider the total time range, the number of input sample, the input and output resolution. +func targetChunkCount(mint, maxt, inRes, outRes int64, count int) (x int) { + // We compute how many samples we could produce for the given time range and adjust + // it by how densely the range is actually filled given the number of input samples and their + // resolution. + maxSamples := float64((maxt - mint) / outRes) + expSamples := int(maxSamples*rangeFullness(mint, maxt, inRes, count)) + 1 + + // Increase the number of target chunks until each chunk will have less than + // 140 samples on average. + for x = 1; expSamples/x > 140; x++ { + } + return x +} + +// aggregator collects cumulative stats for a stream of values. +type aggregator struct { + total int // total samples processed + count int // samples in current window + sum float64 // value sum of current window + min float64 // min of current window + max float64 // max of current window + counter float64 // total counter state since beginning + resets int // number of counter resets since beginning + last float64 // last added value +} + +// reset the stats to start a new aggregation window. +func (a *aggregator) reset() { + a.count = 0 + a.sum = 0 + a.min = math.MaxFloat64 + a.max = -math.MaxFloat64 +} + +func (a *aggregator) add(v float64) { + if a.total > 0 { + if v < a.last { + // Counter reset, correct the value. + a.counter += v + a.resets++ + } else { + // Add delta with last value to the counter. + a.counter += v - a.last + } + } else { + // First sample sets the counter. + a.counter = v + } + a.last = v + + a.sum += v + a.count++ + a.total++ + + if v < a.min { + a.min = v + } + if v > a.max { + a.max = v + } +} + +// aggrChunkBuilder builds chunks for multiple different aggregates. +type aggrChunkBuilder struct { + mint, maxt int64 + added int + + chunks [5]chunkenc.Chunk + apps [5]chunkenc.Appender +} + +func newAggrChunkBuilder() *aggrChunkBuilder { + b := &aggrChunkBuilder{ + mint: math.MaxInt64, + maxt: math.MinInt64, + } + b.chunks[AggrCount] = chunkenc.NewXORChunk() + b.chunks[AggrSum] = chunkenc.NewXORChunk() + b.chunks[AggrMin] = chunkenc.NewXORChunk() + b.chunks[AggrMax] = chunkenc.NewXORChunk() + b.chunks[AggrCounter] = chunkenc.NewXORChunk() + + for i, c := range b.chunks { + if c != nil { + b.apps[i], _ = c.Appender() + } + } + return b +} + +func (b *aggrChunkBuilder) add(t int64, aggr *aggregator) { + if t < b.mint { + b.mint = t + } + if t > b.maxt { + b.maxt = t + } + b.apps[AggrSum].Append(t, aggr.sum) + b.apps[AggrMin].Append(t, aggr.min) + b.apps[AggrMax].Append(t, aggr.max) + b.apps[AggrCount].Append(t, float64(aggr.count)) + b.apps[AggrCounter].Append(t, aggr.counter) + + b.added++ +} + +func (b *aggrChunkBuilder) finalizeChunk(lastT int64, trueSample float64) { + b.apps[AggrCounter].Append(lastT, trueSample) +} + +func (b *aggrChunkBuilder) encode() chunks.Meta { + return chunks.Meta{ + MinTime: b.mint, + MaxTime: b.maxt, + Chunk: EncodeAggrChunk(b.chunks), + } +} + +// downsampleRaw create a series of aggregation chunks for the given sample data. +func downsampleRaw(data []sample, resolution int64) []chunks.Meta { + if len(data) == 0 { + return nil + } + var ( + mint, maxt = data[0].t, data[len(data)-1].t + // We assume a raw resolution of 1 minute. In practice it will often be lower + // but this is sufficient for our heuristic to produce well-sized chunks. + numChunks = targetChunkCount(mint, maxt, 1*60*1000, resolution, len(data)) + chks = make([]chunks.Meta, 0, numChunks) + batchSize = (len(data) / numChunks) + 1 + ) + + for len(data) > 0 { + j := batchSize + if j > len(data) { + j = len(data) + } + curW := currentWindow(data[j-1].t, resolution) + + // The batch we took might end in the middle of a downsampling window. We additionally grab + // all further samples in the window to keep our samples regular. + for ; j < len(data) && data[j].t <= curW; j++ { + } + + ab := newAggrChunkBuilder() + batch := data[:j] + data = data[j:] + + lastT := downsampleBatch(batch, resolution, ab.add) + + // InjectThanosMeta the chunk's counter aggregate with the last true sample. + ab.finalizeChunk(lastT, batch[len(batch)-1].v) + + chks = append(chks, ab.encode()) + } + + return chks +} + +// downsampleBatch aggregates the data over the given resolution and calls add each time +// the end of a resolution was reached. +func downsampleBatch(data []sample, resolution int64, add func(int64, *aggregator)) int64 { + var ( + aggr aggregator + nextT = int64(-1) + lastT = data[len(data)-1].t + ) + // Fill up one aggregate chunk with up to m samples. + for _, s := range data { + if value.IsStaleNaN(s.v) { + continue + } + if s.t > nextT { + if nextT != -1 { + add(nextT, &aggr) + } + aggr.reset() + nextT = currentWindow(s.t, resolution) + // Limit next timestamp to not go beyond the batch. A subsequent batch + // may overlap in time range otherwise. + // We have aligned batches for raw downsamplings but subsequent downsamples + // are forced to be chunk-boundary aligned and cannot guarantee this. + if nextT > lastT { + nextT = lastT + } + } + aggr.add(s.v) + } + // Add the last sample. + add(nextT, &aggr) + + return nextT +} + +// downsampleAggr downsamples a sequence of aggregation chunks to the given resolution. +func downsampleAggr(chks []*AggrChunk, buf *[]sample, mint, maxt, inRes, outRes int64) ([]chunks.Meta, error) { + // We downsample aggregates only along chunk boundaries. This is required for counters + // to be downsampled correctly since a chunks' last counter value is the true last value + // of the original series. We need to preserve it even across multiple aggregation iterations. + var numSamples int + for _, c := range chks { + numSamples += c.NumSamples() + } + var ( + numChunks = targetChunkCount(mint, maxt, inRes, outRes, numSamples) + res = make([]chunks.Meta, 0, numChunks) + batchSize = len(chks) / numChunks + ) + + for len(chks) > 0 { + j := batchSize + if j > len(chks) { + j = len(chks) + } + part := chks[:j] + chks = chks[j:] + + chk, err := downsampleAggrBatch(part, buf, outRes) + if err != nil { + return nil, err + } + res = append(res, chk) + } + return res, nil +} + +// expandChunkIterator reads all samples from the iterator and appends them to buf. +// Stale markers and out of order samples are skipped. +func expandChunkIterator(it chunkenc.Iterator, buf *[]sample) error { + // For safety reasons, we check for each sample that it does not go back in time. + // If it does, we skip it. + lastT := int64(0) + + for it.Next() { + t, v := it.At() + if value.IsStaleNaN(v) { + continue + } + if t >= lastT { + *buf = append(*buf, sample{t, v}) + lastT = t + } + } + return it.Err() +} + +func downsampleAggrBatch(chks []*AggrChunk, buf *[]sample, resolution int64) (chk chunks.Meta, err error) { + ab := &aggrChunkBuilder{} + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + var reuseIt chunkenc.Iterator + + // do does a generic aggregation for count, sum, min, and max aggregates. + // Counters need special treatment. + do := func(at AggrType, f func(a *aggregator) float64) error { + *buf = (*buf)[:0] + // Expand all samples for the aggregate type. + for _, chk := range chks { + c, err := chk.Get(at) + if err == ErrAggrNotExist { + continue + } else if err != nil { + return err + } + if err := expandChunkIterator(c.Iterator(reuseIt), buf); err != nil { + return err + } + } + if len(*buf) == 0 { + return nil + } + ab.chunks[at] = chunkenc.NewXORChunk() + ab.apps[at], _ = ab.chunks[at].Appender() + + downsampleBatch(*buf, resolution, func(t int64, a *aggregator) { + if t < mint { + mint = t + } else if t > maxt { + maxt = t + } + ab.apps[at].Append(t, f(a)) + }) + return nil + } + if err := do(AggrCount, func(a *aggregator) float64 { + return a.sum + }); err != nil { + return chk, err + } + if err = do(AggrSum, func(a *aggregator) float64 { + return a.sum + }); err != nil { + return chk, err + } + if err := do(AggrMin, func(a *aggregator) float64 { + return a.min + }); err != nil { + return chk, err + } + if err := do(AggrMax, func(a *aggregator) float64 { + return a.max + }); err != nil { + return chk, err + } + + // Handle counters by reading them properly. + acs := make([]chunkenc.Iterator, 0, len(chks)) + for _, achk := range chks { + c, err := achk.Get(AggrCounter) + if err == ErrAggrNotExist { + continue + } else if err != nil { + return chk, err + } + acs = append(acs, c.Iterator(reuseIt)) + } + *buf = (*buf)[:0] + it := NewCounterSeriesIterator(acs...) + + if err := expandChunkIterator(it, buf); err != nil { + return chk, err + } + if len(*buf) == 0 { + ab.mint = mint + ab.maxt = maxt + return ab.encode(), nil + } + ab.chunks[AggrCounter] = chunkenc.NewXORChunk() + ab.apps[AggrCounter], _ = ab.chunks[AggrCounter].Appender() + + lastT := downsampleBatch(*buf, resolution, func(t int64, a *aggregator) { + if t < mint { + mint = t + } else if t > maxt { + maxt = t + } + ab.apps[AggrCounter].Append(t, a.counter) + }) + ab.apps[AggrCounter].Append(lastT, it.lastV) + + ab.mint = mint + ab.maxt = maxt + return ab.encode(), nil +} + +type sample struct { + t int64 + v float64 +} + +// CounterSeriesIterator iterates over an ordered sequence of chunks and treats decreasing +// values as counter reset. +// Additionally, it can deal with downsampled counter chunks, which set the last value of a chunk +// to the original last value. The last value can be detected by checking whether the timestamp +// did not increase w.r.t to the previous sample +type CounterSeriesIterator struct { + chks []chunkenc.Iterator + i int // current chunk + total int // total number of processed samples + lastT int64 // timestamp of the last sample + lastV float64 // value of the last sample + totalV float64 // total counter state since beginning of series +} + +func NewCounterSeriesIterator(chks ...chunkenc.Iterator) *CounterSeriesIterator { + return &CounterSeriesIterator{chks: chks} +} + +func (it *CounterSeriesIterator) Next() bool { + if it.i >= len(it.chks) { + return false + } + if ok := it.chks[it.i].Next(); !ok { + it.i++ + // While iterators are ordered, they are not generally guaranteed to be + // non-overlapping. Ensure that the series does not go back in time by seeking at least + // to the next timestamp. + return it.Seek(it.lastT + 1) + } + t, v := it.chks[it.i].At() + + if math.IsNaN(v) { + return it.Next() + } + // First sample sets the initial counter state. + if it.total == 0 { + it.total++ + it.lastT, it.lastV = t, v + it.totalV = v + return true + } + // If the timestamp increased, it is not the special last sample. + if t > it.lastT { + if v >= it.lastV { + it.totalV += v - it.lastV + } else { + it.totalV += v + } + it.lastT, it.lastV = t, v + it.total++ + return true + } + // We hit a sample that indicates what the true last value was. For the + // next chunk we use it to determine whether there was a counter reset between them. + if t == it.lastT { + it.lastV = v + } + // Otherwise the series went back in time and we just keep moving forward. + + return it.Next() +} + +func (it *CounterSeriesIterator) At() (t int64, v float64) { + return it.lastT, it.totalV +} + +func (it *CounterSeriesIterator) Seek(x int64) bool { + for { + if t, _ := it.At(); t >= x { + return true + } + + ok := it.Next() + if !ok { + return false + } + } +} + +func (it *CounterSeriesIterator) Err() error { + if it.i >= len(it.chks) { + return nil + } + return it.chks[it.i].Err() +} + +// AverageChunkIterator emits an artificial series of average samples based in aggregate +// chunks with sum and count aggregates. +type AverageChunkIterator struct { + cntIt chunkenc.Iterator + sumIt chunkenc.Iterator + t int64 + v float64 + err error +} + +func NewAverageChunkIterator(cnt, sum chunkenc.Iterator) *AverageChunkIterator { + return &AverageChunkIterator{cntIt: cnt, sumIt: sum} +} + +func (it *AverageChunkIterator) Next() bool { + cok, sok := it.cntIt.Next(), it.sumIt.Next() + if cok != sok { + it.err = errors.New("sum and count iterator not aligned") + return false + } + if !cok { + return false + } + + cntT, cntV := it.cntIt.At() + sumT, sumV := it.sumIt.At() + if cntT != sumT { + it.err = errors.New("sum and count timestamps not aligned") + return false + } + it.t, it.v = cntT, sumV/cntV + return true +} + +func (it *AverageChunkIterator) At() (int64, float64) { + return it.t, it.v +} + +func (it *AverageChunkIterator) Err() error { + if it.cntIt.Err() != nil { + return it.cntIt.Err() + } + if it.sumIt.Err() != nil { + return it.sumIt.Err() + } + return it.err +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go new file mode 100644 index 0000000000..f591b16548 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/pool.go @@ -0,0 +1,58 @@ +package downsample + +import ( + "sync" + + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +// Pool is a memory pool of chunk objects, supporting Thanos aggregated chunk encoding. +// It maintains separate pools for xor and aggr chunks. +type pool struct { + wrapped chunkenc.Pool + aggr sync.Pool +} + +// TODO(bwplotka): Add reasonable limits to our sync pooling them to detect OOMs early. +func NewPool() chunkenc.Pool { + return &pool{ + wrapped: chunkenc.NewPool(), + aggr: sync.Pool{ + New: func() interface{} { + return &AggrChunk{} + }, + }, + } +} + +func (p *pool) Get(e chunkenc.Encoding, b []byte) (chunkenc.Chunk, error) { + switch e { + case ChunkEncAggr: + c := p.aggr.Get().(*AggrChunk) + *c = AggrChunk(b) + return c, nil + } + + return p.wrapped.Get(e, b) + +} + +func (p *pool) Put(c chunkenc.Chunk) error { + switch c.Encoding() { + case ChunkEncAggr: + ac, ok := c.(*AggrChunk) + // This may happen often with wrapped chunks. Nothing we can really do about + // it but returning an error would cause a lot of allocations again. Thus, + // we just skip it. + if !ok { + return nil + } + + // Clear []byte. + *ac = AggrChunk(nil) + p.aggr.Put(ac) + return nil + } + + return p.wrapped.Put(c) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go new file mode 100644 index 0000000000..ddbd87b435 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/streamed_block_writer.go @@ -0,0 +1,278 @@ +package downsample + +import ( + "io" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunks" + tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/runutil" +) + +type labelValues map[string]struct{} + +func (lv labelValues) add(value string) { + lv[value] = struct{}{} +} + +func (lv labelValues) get(set *[]string) { + for value := range lv { + *set = append(*set, value) + } +} + +type labelsValues map[string]labelValues + +func (lv labelsValues) add(labelSet labels.Labels) { + for _, label := range labelSet { + values, ok := lv[label.Name] + if !ok { + // Add new label. + values = labelValues{} + lv[label.Name] = values + } + values.add(label.Value) + } +} + +// streamedBlockWriter writes downsampled blocks to a new data block. Implemented to save memory consumption +// by writing chunks data right into the files, omitting keeping them in-memory. Index and meta data should be +// sealed afterwards, when there aren't more series to process. +type streamedBlockWriter struct { + blockDir string + finalized bool // set to true, if Close was called + logger log.Logger + ignoreFinalize bool // if true Close does not finalize block due to internal error. + meta metadata.Meta + totalChunks uint64 + totalSamples uint64 + + chunkWriter tsdb.ChunkWriter + indexWriter tsdb.IndexWriter + indexReader tsdb.IndexReader + closers []io.Closer + + labelsValues labelsValues // labelsValues list of used label sets: name -> []values. + memPostings *index.MemPostings // memPostings contains references from label name:value -> postings. + postings uint64 // postings is a current posting position. +} + +// NewStreamedBlockWriter returns streamedBlockWriter instance, it's not concurrency safe. +// Caller is responsible to Close all io.Closers by calling the Close when downsampling is done. +// In case if error happens outside of the StreamedBlockWriter during the processing, +// index and meta files will be written anyway, so the caller is always responsible for removing block directory with +// a garbage on error. +// This approach simplifies StreamedBlockWriter interface, which is a best trade-off taking into account the error is an +// exception, not a general case. +func NewStreamedBlockWriter( + blockDir string, + indexReader tsdb.IndexReader, + logger log.Logger, + originMeta metadata.Meta, +) (w *streamedBlockWriter, err error) { + closers := make([]io.Closer, 0, 2) + + // We should close any opened Closer up to an error. + defer func() { + if err != nil { + var merr tsdberrors.MultiError + merr.Add(err) + for _, cl := range closers { + merr.Add(cl.Close()) + } + err = merr.Err() + } + }() + + chunkWriter, err := chunks.NewWriter(filepath.Join(blockDir, block.ChunksDirname)) + if err != nil { + return nil, errors.Wrap(err, "create chunk writer in streamedBlockWriter") + } + closers = append(closers, chunkWriter) + + indexWriter, err := index.NewWriter(filepath.Join(blockDir, block.IndexFilename)) + if err != nil { + return nil, errors.Wrap(err, "open index writer in streamedBlockWriter") + } + closers = append(closers, indexWriter) + + symbols, err := indexReader.Symbols() + if err != nil { + return nil, errors.Wrap(err, "read symbols") + } + + err = indexWriter.AddSymbols(symbols) + if err != nil { + return nil, errors.Wrap(err, "add symbols") + } + + return &streamedBlockWriter{ + logger: logger, + blockDir: blockDir, + indexReader: indexReader, + indexWriter: indexWriter, + chunkWriter: chunkWriter, + meta: originMeta, + closers: closers, + labelsValues: make(labelsValues, 1024), + memPostings: index.NewUnorderedMemPostings(), + }, nil +} + +// WriteSeries writes chunks data to the chunkWriter, writes lset and chunks Metas to indexWrites and adds label sets to +// labelsValues sets and memPostings to be written on the finalize state in the end of downsampling process. +func (w *streamedBlockWriter) WriteSeries(lset labels.Labels, chunks []chunks.Meta) error { + if w.finalized || w.ignoreFinalize { + return errors.Errorf("series can't be added, writers has been closed or internal error happened") + } + + if len(chunks) == 0 { + level.Warn(w.logger).Log("empty chunks happened, skip series", lset) + return nil + } + + if err := w.chunkWriter.WriteChunks(chunks...); err != nil { + w.ignoreFinalize = true + return errors.Wrap(err, "add chunks") + } + + if err := w.indexWriter.AddSeries(w.postings, lset, chunks...); err != nil { + w.ignoreFinalize = true + return errors.Wrap(err, "add series") + } + + w.labelsValues.add(lset) + w.memPostings.Add(w.postings, lset) + w.postings++ + + w.totalChunks += uint64(len(chunks)) + for i := range chunks { + w.totalSamples += uint64(chunks[i].Chunk.NumSamples()) + } + + return nil +} + +// Close calls finalizer to complete index and meta files and closes all io.CLoser writers. +// Idempotent. +func (w *streamedBlockWriter) Close() error { + if w.finalized { + return nil + } + w.finalized = true + + merr := tsdberrors.MultiError{} + + if w.ignoreFinalize { + // Close open file descriptors anyway. + for _, cl := range w.closers { + merr.Add(cl.Close()) + } + return merr.Err() + } + + // Finalize saves prepared index and metadata to corresponding files. + + if err := w.writeLabelSets(); err != nil { + return errors.Wrap(err, "write label sets") + } + + if err := w.writeMemPostings(); err != nil { + return errors.Wrap(err, "write mem postings") + } + + for _, cl := range w.closers { + merr.Add(cl.Close()) + } + + if err := block.WriteIndexCache( + w.logger, + filepath.Join(w.blockDir, block.IndexFilename), + filepath.Join(w.blockDir, block.IndexCacheFilename), + ); err != nil { + return errors.Wrap(err, "write index cache") + } + + if err := w.writeMetaFile(); err != nil { + return errors.Wrap(err, "write meta meta") + } + + if err := w.syncDir(); err != nil { + return errors.Wrap(err, "sync blockDir") + } + + if err := merr.Err(); err != nil { + return errors.Wrap(err, "finalize") + } + + // No error, claim success. + + level.Info(w.logger).Log( + "msg", "finalized downsampled block", + "mint", w.meta.MinTime, + "maxt", w.meta.MaxTime, + "ulid", w.meta.ULID, + "resolution", w.meta.Thanos.Downsample.Resolution, + ) + return nil +} + +// syncDir syncs blockDir on disk. +func (w *streamedBlockWriter) syncDir() (err error) { + df, err := fileutil.OpenDir(w.blockDir) + if err != nil { + return errors.Wrap(err, "open temporary block blockDir") + } + + defer runutil.CloseWithErrCapture(&err, df, "close temporary block blockDir") + + if err := fileutil.Fdatasync(df); err != nil { + return errors.Wrap(err, "sync temporary blockDir") + } + + return nil +} + +// writeLabelSets fills the index writer with label sets. +func (w *streamedBlockWriter) writeLabelSets() error { + s := make([]string, 0, 256) + for n, v := range w.labelsValues { + s = s[:0] + v.get(&s) + if err := w.indexWriter.WriteLabelIndex([]string{n}, s); err != nil { + return errors.Wrap(err, "write label index") + } + } + return nil +} + +// writeMemPostings fills the index writer with mem postings. +func (w *streamedBlockWriter) writeMemPostings() error { + w.memPostings.EnsureOrder() + for _, l := range w.memPostings.SortedKeys() { + if err := w.indexWriter.WritePostings(l.Name, l.Value, w.memPostings.Get(l.Name, l.Value)); err != nil { + return errors.Wrap(err, "write postings") + } + } + return nil +} + +// writeMetaFile writes meta file. +func (w *streamedBlockWriter) writeMetaFile() error { + w.meta.Version = metadata.MetaVersion1 + w.meta.Thanos.Source = metadata.CompactorSource + w.meta.Stats.NumChunks = w.totalChunks + w.meta.Stats.NumSamples = w.totalSamples + w.meta.Stats.NumSeries = w.postings + + return metadata.Write(w.logger, w.blockDir, &w.meta) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/component/component.go b/vendor/github.com/thanos-io/thanos/pkg/component/component.go new file mode 100644 index 0000000000..d00966fc2e --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/component/component.go @@ -0,0 +1,95 @@ +package component + +import ( + "strings" + + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +// Component is a generic component interface. +type Component interface { + String() string +} + +// StoreAPI is a component that implements Thanos' gRPC StoreAPI. +type StoreAPI interface { + implementsStoreAPI() + String() string + ToProto() storepb.StoreType +} + +// Source is a Thanos component that produce blocks of metrics. +type Source interface { + producesBlocks() + String() string +} + +// SourceStoreAPI is a component that implements Thanos' gRPC StoreAPI +// and produce blocks of metrics. +type SourceStoreAPI interface { + implementsStoreAPI() + producesBlocks() + String() string + ToProto() storepb.StoreType +} + +type component struct { + name string +} + +func (c component) String() string { return c.name } + +type storeAPI struct { + component +} + +func (storeAPI) implementsStoreAPI() {} + +func (s sourceStoreAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + +func (s storeAPI) ToProto() storepb.StoreType { + return storepb.StoreType(storepb.StoreType_value[strings.ToUpper(s.String())]) +} + +type source struct { + component +} + +func (source) producesBlocks() {} + +type sourceStoreAPI struct { + component + source + storeAPI +} + +// FromProto converts from a gRPC StoreType to StoreAPI. +func FromProto(storeType storepb.StoreType) StoreAPI { + switch storeType { + case storepb.StoreType_QUERY: + return Query + case storepb.StoreType_RULE: + return Rule + case storepb.StoreType_SIDECAR: + return Sidecar + case storepb.StoreType_STORE: + return Store + case storepb.StoreType_RECEIVE: + return Receive + default: + return nil + } +} + +var ( + Bucket = source{component: component{name: "bucket"}} + Compact = source{component: component{name: "compact"}} + Downsample = source{component: component{name: "downsample"}} + Query = sourceStoreAPI{component: component{name: "query"}} + Rule = sourceStoreAPI{component: component{name: "rule"}} + Sidecar = sourceStoreAPI{component: component{name: "sidecar"}} + Store = sourceStoreAPI{component: component{name: "store"}} + Receive = sourceStoreAPI{component: component{name: "receive"}} +) diff --git a/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go b/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go new file mode 100644 index 0000000000..f2d7d09ab1 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/extprom/extprom.go @@ -0,0 +1,21 @@ +package extprom + +import "github.com/prometheus/client_golang/prometheus" + +// WrapRegistererWithPrefix is like prometheus.WrapRegistererWithPrefix but it passes nil straight through +// which allows nil check. +func WrapRegistererWithPrefix(prefix string, reg prometheus.Registerer) prometheus.Registerer { + if reg == nil { + return nil + } + return prometheus.WrapRegistererWithPrefix(prefix, reg) +} + +// WrapRegistererWith is like prometheus.WrapRegistererWith but it passes nil straight through +// which allows nil check. +func WrapRegistererWith(labels prometheus.Labels, reg prometheus.Registerer) prometheus.Registerer { + if reg == nil { + return nil + } + return prometheus.WrapRegistererWith(labels, reg) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/model/timeduration.go b/vendor/github.com/thanos-io/thanos/pkg/model/timeduration.go new file mode 100644 index 0000000000..bbe766043f --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/model/timeduration.go @@ -0,0 +1,75 @@ +package model + +import ( + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/timestamp" + "gopkg.in/alecthomas/kingpin.v2" +) + +// TimeOrDurationValue is a custom kingping parser for time in RFC3339 +// or duration in Go's duration format, such as "300ms", "-1.5h" or "2h45m". +// Only one will be set. +type TimeOrDurationValue struct { + Time *time.Time + Dur *model.Duration +} + +// Set converts string to TimeOrDurationValue. +func (tdv *TimeOrDurationValue) Set(s string) error { + t, err := time.Parse(time.RFC3339, s) + if err == nil { + tdv.Time = &t + return nil + } + + // error parsing time, let's try duration. + var minus bool + if s[0] == '-' { + minus = true + s = s[1:] + } + dur, err := model.ParseDuration(s) + if err != nil { + return err + } + + if minus { + dur = dur * -1 + } + tdv.Dur = &dur + return nil +} + +// String returns either tume or duration. +func (tdv *TimeOrDurationValue) String() string { + switch { + case tdv.Time != nil: + return tdv.Time.String() + case tdv.Dur != nil: + return tdv.Dur.String() + } + + return "nil" +} + +// PrometheusTimestamp returns TimeOrDurationValue converted to PrometheusTimestamp +// if duration is set now+duration is converted to Timestamp. +func (tdv *TimeOrDurationValue) PrometheusTimestamp() int64 { + switch { + case tdv.Time != nil: + return timestamp.FromTime(*tdv.Time) + case tdv.Dur != nil: + return timestamp.FromTime(time.Now().Add(time.Duration(*tdv.Dur))) + } + + return 0 +} + +// TimeOrDuration helper for parsing TimeOrDuration with kingpin. +func TimeOrDuration(flags *kingpin.FlagClause) *TimeOrDurationValue { + value := new(TimeOrDurationValue) + flags.SetValue(value) + return value +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go new file mode 100644 index 0000000000..79a443fa78 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/objstore.go @@ -0,0 +1,382 @@ +package objstore + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/runutil" +) + +// Bucket provides read and write access to an object storage bucket. +// NOTE: We assume strong consistency for write-read flow. +type Bucket interface { + io.Closer + BucketReader + + // Upload the contents of the reader as an object into the bucket. + Upload(ctx context.Context, name string, r io.Reader) error + + // Delete removes the object with the given name. + Delete(ctx context.Context, name string) error + + // Name returns the bucket name for the provider. + Name() string +} + +// BucketReader provides read access to an object storage bucket. +type BucketReader interface { + // Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full + // object name including the prefix of the inspected directory. + Iter(ctx context.Context, dir string, f func(string) error) error + + // Get returns a reader for the given object name. + Get(ctx context.Context, name string) (io.ReadCloser, error) + + // GetRange returns a new range reader for the given object name and range. + GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) + + // Exists checks if the given object exists in the bucket. + // TODO(bplotka): Consider removing Exists in favor of helper that do Get & IsObjNotFoundErr (less code to maintain). + Exists(ctx context.Context, name string) (bool, error) + + // IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. + IsObjNotFoundErr(err error) bool +} + +// UploadDir uploads all files in srcdir to the bucket with into a top-level directory +// named dstdir. It is a caller responsibility to clean partial upload in case of failure. +func UploadDir(ctx context.Context, logger log.Logger, bkt Bucket, srcdir, dstdir string) error { + df, err := os.Stat(srcdir) + if err != nil { + return errors.Wrap(err, "stat dir") + } + if !df.IsDir() { + return errors.Errorf("%s is not a directory", srcdir) + } + return filepath.Walk(srcdir, func(src string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if fi.IsDir() { + return nil + } + dst := filepath.Join(dstdir, strings.TrimPrefix(src, srcdir)) + + return UploadFile(ctx, logger, bkt, src, dst) + }) +} + +// UploadFile uploads the file with the given name to the bucket. +// It is a caller responsibility to clean partial upload in case of failure +func UploadFile(ctx context.Context, logger log.Logger, bkt Bucket, src, dst string) error { + r, err := os.Open(src) + if err != nil { + return errors.Wrapf(err, "open file %s", src) + } + defer runutil.CloseWithLogOnErr(logger, r, "close file %s", src) + + if err := bkt.Upload(ctx, dst, r); err != nil { + return errors.Wrapf(err, "upload file %s as %s", src, dst) + } + return nil +} + +// DirDelim is the delimiter used to model a directory structure in an object store bucket. +const DirDelim = "/" + +// DeleteDir removes all objects prefixed with dir from the bucket. +func DeleteDir(ctx context.Context, bkt Bucket, dir string) error { + return bkt.Iter(ctx, dir, func(name string) error { + // If we hit a directory, call DeleteDir recursively. + if strings.HasSuffix(name, DirDelim) { + return DeleteDir(ctx, bkt, name) + } + return bkt.Delete(ctx, name) + }) +} + +// DownloadFile downloads the src file from the bucket to dst. If dst is an existing +// directory, a file with the same name as the source is created in dst. +// If destination file is already existing, download file will overwrite it. +func DownloadFile(ctx context.Context, logger log.Logger, bkt BucketReader, src, dst string) (err error) { + if fi, err := os.Stat(dst); err == nil { + if fi.IsDir() { + dst = filepath.Join(dst, filepath.Base(src)) + } + } else if !os.IsNotExist(err) { + return err + } + + rc, err := bkt.Get(ctx, src) + if err != nil { + return errors.Wrap(err, "get file") + } + defer runutil.CloseWithLogOnErr(logger, rc, "download block's file reader") + + f, err := os.Create(dst) + if err != nil { + return errors.Wrap(err, "create file") + } + defer func() { + if err != nil { + if rerr := os.Remove(dst); rerr != nil { + level.Warn(logger).Log("msg", "failed to remove partially downloaded file", "file", dst, "err", rerr) + } + } + }() + defer runutil.CloseWithLogOnErr(logger, f, "download block's output file") + + if _, err = io.Copy(f, rc); err != nil { + return errors.Wrap(err, "copy object to file") + } + return nil +} + +// DownloadDir downloads all object found in the directory into the local directory. +func DownloadDir(ctx context.Context, logger log.Logger, bkt BucketReader, src, dst string) error { + if err := os.MkdirAll(dst, 0777); err != nil { + return errors.Wrap(err, "create dir") + } + + var downloadedFiles []string + if err := bkt.Iter(ctx, src, func(name string) error { + if strings.HasSuffix(name, DirDelim) { + return DownloadDir(ctx, logger, bkt, name, filepath.Join(dst, filepath.Base(name))) + } + if err := DownloadFile(ctx, logger, bkt, name, dst); err != nil { + return err + } + + downloadedFiles = append(downloadedFiles, dst) + return nil + }); err != nil { + // Best-effort cleanup if the download failed. + for _, f := range downloadedFiles { + if rerr := os.Remove(f); rerr != nil { + level.Warn(logger).Log("msg", "failed to remove file on partial dir download error", "file", f, "err", rerr) + } + } + return err + } + + return nil +} + +// Exists returns true, if file exists, otherwise false and nil error if presence IsObjNotFoundErr, otherwise false with +// returning error. +func Exists(ctx context.Context, bkt Bucket, src string) (bool, error) { + rc, err := bkt.Get(ctx, src) + if rc != nil { + _ = rc.Close() + } + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return false, nil + } + return false, errors.Wrap(err, "stat object") + } + + return true, nil +} + +// BucketWithMetrics takes a bucket and registers metrics with the given registry for +// operations run against the bucket. +func BucketWithMetrics(name string, b Bucket, r prometheus.Registerer) Bucket { + bkt := &metricBucket{ + bkt: b, + + ops: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_objstore_bucket_operations_total", + Help: "Total number of operations against a bucket.", + ConstLabels: prometheus.Labels{"bucket": name}, + }, []string{"operation"}), + + opsFailures: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_objstore_bucket_operation_failures_total", + Help: "Total number of operations against a bucket that failed.", + ConstLabels: prometheus.Labels{"bucket": name}, + }, []string{"operation"}), + + opsDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "thanos_objstore_bucket_operation_duration_seconds", + Help: "Duration of operations against the bucket", + ConstLabels: prometheus.Labels{"bucket": name}, + Buckets: []float64{0.005, 0.01, 0.02, 0.04, 0.08, 0.15, 0.3, 0.6, 1, 1.5, 2.5, 5, 10, 20, 30}, + }, []string{"operation"}), + lastSuccessfullUploadTime: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "thanos_objstore_bucket_last_successful_upload_time", + Help: "Second timestamp of the last successful upload to the bucket.", + }, []string{"bucket"}), + } + if r != nil { + r.MustRegister(bkt.ops, bkt.opsFailures, bkt.opsDuration, bkt.lastSuccessfullUploadTime) + } + return bkt +} + +type metricBucket struct { + bkt Bucket + + ops *prometheus.CounterVec + opsFailures *prometheus.CounterVec + opsDuration *prometheus.HistogramVec + lastSuccessfullUploadTime *prometheus.GaugeVec +} + +func (b *metricBucket) Iter(ctx context.Context, dir string, f func(name string) error) error { + const op = "iter" + + err := b.bkt.Iter(ctx, dir, f) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + } + b.ops.WithLabelValues(op).Inc() + + return err +} + +func (b *metricBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + const op = "get" + b.ops.WithLabelValues(op).Inc() + + rc, err := b.bkt.Get(ctx, name) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + return nil, err + } + rc = newTimingReadCloser( + rc, + op, + b.opsDuration, + b.opsFailures, + ) + + return rc, nil +} + +func (b *metricBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + const op = "get_range" + b.ops.WithLabelValues(op).Inc() + + rc, err := b.bkt.GetRange(ctx, name, off, length) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + return nil, err + } + rc = newTimingReadCloser( + rc, + op, + b.opsDuration, + b.opsFailures, + ) + + return rc, nil +} + +func (b *metricBucket) Exists(ctx context.Context, name string) (bool, error) { + const op = "exists" + start := time.Now() + + ok, err := b.bkt.Exists(ctx, name) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + } + b.ops.WithLabelValues(op).Inc() + b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) + + return ok, err +} + +func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) error { + const op = "upload" + start := time.Now() + + err := b.bkt.Upload(ctx, name, r) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + } else { + //TODO: Use SetToCurrentTime() once we update the Prometheus client_golang + b.lastSuccessfullUploadTime.WithLabelValues(b.bkt.Name()).Set(float64(time.Now().UnixNano()) / 1e9) + } + b.ops.WithLabelValues(op).Inc() + b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) + + return err +} + +func (b *metricBucket) Delete(ctx context.Context, name string) error { + const op = "delete" + start := time.Now() + + err := b.bkt.Delete(ctx, name) + if err != nil { + b.opsFailures.WithLabelValues(op).Inc() + } + b.ops.WithLabelValues(op).Inc() + b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) + + return err +} + +func (b *metricBucket) IsObjNotFoundErr(err error) bool { + return b.bkt.IsObjNotFoundErr(err) +} + +func (b *metricBucket) Close() error { + return b.bkt.Close() +} + +func (b *metricBucket) Name() string { + return b.bkt.Name() +} + +type timingReadCloser struct { + io.ReadCloser + + ok bool + start time.Time + op string + duration *prometheus.HistogramVec + failed *prometheus.CounterVec +} + +func newTimingReadCloser(rc io.ReadCloser, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec) *timingReadCloser { + // Initialize the metrics with 0. + dur.WithLabelValues(op) + failed.WithLabelValues(op) + return &timingReadCloser{ + ReadCloser: rc, + ok: true, + start: time.Now(), + op: op, + duration: dur, + failed: failed, + } +} + +func (rc *timingReadCloser) Close() error { + err := rc.ReadCloser.Close() + rc.duration.WithLabelValues(rc.op).Observe(time.Since(rc.start).Seconds()) + if rc.ok && err != nil { + rc.failed.WithLabelValues(rc.op).Inc() + rc.ok = false + } + return err +} + +func (rc *timingReadCloser) Read(b []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(b) + if rc.ok && err != nil && err != io.EOF { + rc.failed.WithLabelValues(rc.op).Inc() + rc.ok = false + } + return n, err +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go new file mode 100644 index 0000000000..96e0bf0082 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go @@ -0,0 +1,423 @@ +// Package s3 implements common object storage abstractions against s3-compatible APIs. +package s3 + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "os" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + minio "github.com/minio/minio-go/v6" + "github.com/minio/minio-go/v6/pkg/credentials" + "github.com/minio/minio-go/v6/pkg/encrypt" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + yaml "gopkg.in/yaml.v2" +) + +// DirDelim is the delimiter used to model a directory structure in an object store bucket. +const DirDelim = "/" + +// Minimum file size after which an HTTP multipart request should be used to upload objects to storage. +// Set to 128 MiB as in the minio client. +const defaultMinPartSize = 1024 * 1024 * 128 + +// Config stores the configuration for s3 bucket. +type Config struct { + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + Region string `yaml:"region"` + AccessKey string `yaml:"access_key"` + Insecure bool `yaml:"insecure"` + SignatureV2 bool `yaml:"signature_version2"` + SSEEncryption bool `yaml:"encrypt_sse"` + SecretKey string `yaml:"secret_key"` + PutUserMetadata map[string]string `yaml:"put_user_metadata"` + HTTPConfig HTTPConfig `yaml:"http_config"` + TraceConfig TraceConfig `yaml:"trace"` + PartSize uint64 `yaml:"part_size"` +} + +type TraceConfig struct { + Enable bool `yaml:"enable"` +} + +// HTTPConfig stores the http.Transport configuration for the s3 minio client. +type HTTPConfig struct { + IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` + ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` +} + +// Bucket implements the store.Bucket interface against s3-compatible APIs. +type Bucket struct { + logger log.Logger + name string + client *minio.Client + sse encrypt.ServerSide + putUserMetadata map[string]string + partSize uint64 +} + +// parseConfig unmarshals a buffer into a Config with default HTTPConfig values. +func parseConfig(conf []byte) (Config, error) { + defaultHTTPConfig := HTTPConfig{ + IdleConnTimeout: model.Duration(90 * time.Second), + ResponseHeaderTimeout: model.Duration(2 * time.Minute), + } + config := Config{HTTPConfig: defaultHTTPConfig} + if err := yaml.Unmarshal(conf, &config); err != nil { + return Config{}, err + } + + if config.PutUserMetadata == nil { + config.PutUserMetadata = make(map[string]string) + } + + if config.PartSize == 0 { + config.PartSize = defaultMinPartSize + } + + return config, nil +} + +// NewBucket returns a new Bucket using the provided s3 config values. +func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { + config, err := parseConfig(conf) + if err != nil { + return nil, err + } + + return NewBucketWithConfig(logger, config, component) +} + +// NewBucketWithConfig returns a new Bucket using the provided s3 config values. +func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { + var chain []credentials.Provider + + if err := validate(config); err != nil { + return nil, err + } + if config.AccessKey != "" { + signature := credentials.SignatureV4 + // TODO(bwplotka): Don't do flags, use actual v2, v4 params. + if config.SignatureV2 { + signature = credentials.SignatureV2 + } + + chain = []credentials.Provider{&credentials.Static{ + Value: credentials.Value{ + AccessKeyID: config.AccessKey, + SecretAccessKey: config.SecretKey, + SignerType: signature, + }, + }} + } else { + chain = []credentials.Provider{ + &credentials.EnvAWS{}, + &credentials.FileAWSCredentials{}, + &credentials.IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + }, + } + } + + client, err := minio.NewWithCredentials(config.Endpoint, credentials.NewChainCredentials(chain), !config.Insecure, config.Region) + if err != nil { + return nil, errors.Wrap(err, "initialize s3 client") + } + client.SetAppInfo(fmt.Sprintf("thanos-%s", component), fmt.Sprintf("%s (%s)", version.Version, runtime.Version())) + client.SetCustomTransport(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout), + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + // The ResponseHeaderTimeout here is the only change + // from the default minio transport, it was introduced + // to cover cases where the tcp connection works but + // the server never answers. Defaults to 2 minutes. + ResponseHeaderTimeout: time.Duration(config.HTTPConfig.ResponseHeaderTimeout), + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + TLSClientConfig: &tls.Config{InsecureSkipVerify: config.HTTPConfig.InsecureSkipVerify}, + }) + + var sse encrypt.ServerSide + if config.SSEEncryption { + sse = encrypt.NewSSE() + } + + if config.TraceConfig.Enable { + logWriter := log.NewStdlibAdapter(level.Debug(logger), log.MessageKey("s3TraceMsg")) + client.TraceOn(logWriter) + } + + bkt := &Bucket{ + logger: logger, + name: config.Bucket, + client: client, + sse: sse, + putUserMetadata: config.PutUserMetadata, + partSize: config.PartSize, + } + return bkt, nil +} + +// Name returns the bucket name for s3. +func (b *Bucket) Name() string { + return b.name +} + +// validate checks to see the config options are set. +func validate(conf Config) error { + if conf.Endpoint == "" { + return errors.New("no s3 endpoint in config file") + } + + if conf.AccessKey == "" && conf.SecretKey != "" { + return errors.New("no s3 acccess_key specified while secret_key is present in config file; either both should be present in config or envvars/IAM should be used.") + } + + if conf.AccessKey != "" && conf.SecretKey == "" { + return errors.New("no s3 secret_key specified while access_key is present in config file; either both should be present in config or envvars/IAM should be used.") + } + return nil +} + +// ValidateForTests checks to see the config options for tests are set. +func ValidateForTests(conf Config) error { + if conf.Endpoint == "" || + conf.AccessKey == "" || + conf.SecretKey == "" { + return errors.New("insufficient s3 test configuration information") + } + return nil +} + +// Iter calls f for each entry in the given directory. The argument to f is the full +// object name including the prefix of the inspected directory. +func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) error { + // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the + // object itself as one prefix item. + if dir != "" { + dir = strings.TrimSuffix(dir, DirDelim) + DirDelim + } + + for object := range b.client.ListObjects(b.name, dir, false, ctx.Done()) { + // Catch the error when failed to list objects. + if object.Err != nil { + return object.Err + } + // This sometimes happens with empty buckets. + if object.Key == "" { + continue + } + if err := f(object.Key); err != nil { + return err + } + } + + return nil +} + +func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + opts := &minio.GetObjectOptions{ServerSideEncryption: b.sse} + if length != -1 { + if err := opts.SetRange(off, off+length-1); err != nil { + return nil, err + } + } + r, err := b.client.GetObjectWithContext(ctx, b.name, name, *opts) + if err != nil { + return nil, err + } + + // NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here + // for convenience. + if _, err := r.Read(nil); err != nil { + runutil.CloseWithLogOnErr(b.logger, r, "s3 get range obj close") + + // First GET Object request error. + return nil, err + } + + return r, nil +} + +// Get returns a reader for the given object name. +func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + return b.getRange(ctx, name, 0, -1) +} + +// GetRange returns a new range reader for the given object name and range. +func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + return b.getRange(ctx, name, off, length) +} + +// Exists checks if the given object exists. +func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { + _, err := b.client.StatObject(b.name, name, minio.StatObjectOptions{}) + if err != nil { + if b.IsObjNotFoundErr(err) { + return false, nil + } + return false, errors.Wrap(err, "stat s3 object") + } + + return true, nil +} + +func (b *Bucket) guessFileSize(name string, r io.Reader) int64 { + if f, ok := r.(*os.File); ok { + fileInfo, err := f.Stat() + if err == nil { + return fileInfo.Size() + } + level.Warn(b.logger).Log("msg", "could not stat file for multipart upload", "name", name, "err", err) + return -1 + } + + level.Warn(b.logger).Log("msg", "could not guess file size for multipart upload", "name", name) + return -1 +} + +// Upload the contents of the reader as an object into the bucket. +func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { + // TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. + fileSize := b.guessFileSize(name, r) + + if _, err := b.client.PutObjectWithContext( + ctx, + b.name, + name, + r, + fileSize, + minio.PutObjectOptions{ + PartSize: b.partSize, + ServerSideEncryption: b.sse, + UserMetadata: b.putUserMetadata, + }, + ); err != nil { + return errors.Wrap(err, "upload s3 object") + } + + return nil +} + +// Delete removes the object with the given name. +func (b *Bucket) Delete(ctx context.Context, name string) error { + return b.client.RemoveObject(b.name, name) +} + +// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. +func (b *Bucket) IsObjNotFoundErr(err error) bool { + return minio.ToErrorResponse(err).Code == "NoSuchKey" +} + +func (b *Bucket) Close() error { return nil } + +func configFromEnv() Config { + c := Config{ + Bucket: os.Getenv("S3_BUCKET"), + Endpoint: os.Getenv("S3_ENDPOINT"), + AccessKey: os.Getenv("S3_ACCESS_KEY"), + SecretKey: os.Getenv("S3_SECRET_KEY"), + } + + c.Insecure, _ = strconv.ParseBool(os.Getenv("S3_INSECURE")) + c.HTTPConfig.InsecureSkipVerify, _ = strconv.ParseBool(os.Getenv("S3_INSECURE_SKIP_VERIFY")) + c.SignatureV2, _ = strconv.ParseBool(os.Getenv("S3_SIGNATURE_VERSION2")) + return c +} + +// NewTestBucket creates test bkt client that before returning creates temporary bucket. +// In a close function it empties and deletes the bucket. +func NewTestBucket(t testing.TB, location string) (objstore.Bucket, func(), error) { + c := configFromEnv() + if err := ValidateForTests(c); err != nil { + return nil, nil, err + } + + if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { + return nil, nil, errors.New("S3_BUCKET is defined. Normally this tests will create temporary bucket " + + "and delete it after test. Unset S3_BUCKET env variable to use default logic. If you really want to run " + + "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + + "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + + "to safety (accidentally pointing prod bucket for test) as well as aws s3 not being fully strong consistent.") + } + + return NewTestBucketFromConfig(t, location, c, true) +} + +func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucket bool) (objstore.Bucket, func(), error) { + bc, err := yaml.Marshal(c) + if err != nil { + return nil, nil, err + } + b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") + if err != nil { + return nil, nil, err + } + + bktToCreate := c.Bucket + if c.Bucket != "" && reuseBucket { + if err := b.Iter(context.Background(), "", func(f string) error { + return errors.Errorf("bucket %s is not empty", c.Bucket) + }); err != nil { + return nil, nil, errors.Wrapf(err, "s3 check bucket %s", c.Bucket) + } + + t.Log("WARNING. Reusing", c.Bucket, "AWS bucket for AWS tests. Manual cleanup afterwards is required") + return b, func() {}, nil + } + + if c.Bucket == "" { + src := rand.NewSource(time.Now().UnixNano()) + + // Bucket name need to conform: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html + bktToCreate = strings.Replace(fmt.Sprintf("test_%s_%x", strings.ToLower(t.Name()), src.Int63()), "_", "-", -1) + if len(bktToCreate) >= 63 { + bktToCreate = bktToCreate[:63] + } + } + + if err := b.client.MakeBucket(bktToCreate, location); err != nil { + return nil, nil, err + } + b.name = bktToCreate + t.Log("created temporary AWS bucket for AWS tests with name", bktToCreate, "in", location) + + return b, func() { + objstore.EmptyBucket(t, context.Background(), b) + if err := b.client.RemoveBucket(bktToCreate); err != nil { + t.Logf("deleting bucket %s failed: %s", bktToCreate, err) + } + }, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go new file mode 100644 index 0000000000..18010aa5f3 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/testing.go @@ -0,0 +1,43 @@ +package objstore + +import ( + "context" + "strings" + "sync" + "testing" +) + +// EmptyBucket deletes all objects from bucket. This operation is required to properly delete bucket as a whole. +// It is used for testing only. +// TODO(bplotka): Add retries. +func EmptyBucket(t testing.TB, ctx context.Context, bkt Bucket) { + var wg sync.WaitGroup + + queue := []string{""} + for len(queue) > 0 { + elem := queue[0] + queue = queue[1:] + + err := bkt.Iter(ctx, elem, func(p string) error { + if strings.HasSuffix(p, DirDelim) { + queue = append(queue, p) + return nil + } + + wg.Add(1) + go func() { + if err := bkt.Delete(ctx, p); err != nil { + t.Logf("deleting object %s failed: %s", p, err) + } + wg.Done() + }() + return nil + }) + if err != nil { + t.Logf("iterating over bucket objects failed: %s", err) + wg.Wait() + return + } + } + wg.Wait() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go new file mode 100644 index 0000000000..9c4a2f2212 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/pool/pool.go @@ -0,0 +1,109 @@ +package pool + +import ( + "sync" + + "github.com/pkg/errors" +) + +// BytesPool is a bucketed pool for variably sized byte slices. It can be configured to not allow +// more than a maximum number of bytes being used at a given time. +// Every byte slice obtained from the pool must be returned. +type BytesPool struct { + buckets []sync.Pool + sizes []int + maxTotal uint64 + usedTotal uint64 + mtx sync.Mutex + + new func(s int) *[]byte +} + +// NewBytesPool returns a new BytesPool with size buckets for minSize to maxSize +// increasing by the given factor and maximum number of used bytes. +// No more than maxTotal bytes can be used at any given time unless maxTotal is set to 0. +func NewBytesPool(minSize, maxSize int, factor float64, maxTotal uint64) (*BytesPool, error) { + if minSize < 1 { + return nil, errors.New("invalid minimum pool size") + } + if maxSize < 1 { + return nil, errors.New("invalid maximum pool size") + } + if factor < 1 { + return nil, errors.New("invalid factor") + } + + var sizes []int + + for s := minSize; s <= maxSize; s = int(float64(s) * factor) { + sizes = append(sizes, s) + } + p := &BytesPool{ + buckets: make([]sync.Pool, len(sizes)), + sizes: sizes, + maxTotal: maxTotal, + new: func(sz int) *[]byte { + s := make([]byte, 0, sz) + return &s + }, + } + return p, nil +} + +// ErrPoolExhausted is returned if a pool cannot provide the request bytes. +var ErrPoolExhausted = errors.New("pool exhausted") + +// Get returns a new byte slices that fits the given size. +func (p *BytesPool) Get(sz int) (*[]byte, error) { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.maxTotal > 0 && p.usedTotal+uint64(sz) > p.maxTotal { + return nil, ErrPoolExhausted + } + + for i, bktSize := range p.sizes { + if sz > bktSize { + continue + } + b, ok := p.buckets[i].Get().(*[]byte) + if !ok { + b = p.new(bktSize) + } + + p.usedTotal += uint64(cap(*b)) + return b, nil + } + + // The requested size exceeds that of our highest bucket, allocate it directly. + p.usedTotal += uint64(sz) + return p.new(sz), nil +} + +// Put returns a byte slice to the right bucket in the pool. +func (p *BytesPool) Put(b *[]byte) { + if b == nil { + return + } + + for i, bktSize := range p.sizes { + if cap(*b) > bktSize { + continue + } + *b = (*b)[:0] + p.buckets[i].Put(b) + break + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + // We could assume here that our users will not make the slices larger + // but lets be on the safe side to avoid an underflow of p.usedTotal. + sz := uint64(cap(*b)) + if sz >= p.usedTotal { + p.usedTotal = 0 + } else { + p.usedTotal -= sz + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go new file mode 100644 index 0000000000..e5f7fa2ef6 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/runutil/runutil.go @@ -0,0 +1,151 @@ +// Package runutil provides helpers to advanced function scheduling control like repeat or retry. +// +// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. +// Below function does it for you. +// +// For repeat executes, use Repeat: +// +// err := runutil.Repeat(10*time.Second, stopc, func() error { +// // ... +// }) +// +// Retry starts executing closure function f until no error is returned from f: +// +// err := runutil.Retry(10*time.Second, stopc, func() error { +// // ... +// }) +// +// For logging an error on each f error, use RetryWithLog: +// +// err := runutil.RetryWithLog(logger, 10*time.Second, stopc, func() error { +// // ... +// }) +// +// Another use case for runutil package is when you want to close a `Closer` interface. As we all know, we should close all implements of `Closer`, such as *os.File. Commonly we will use: +// +// defer closer.Close() +// +// The problem is that Close() usually can return important error e.g for os.File the actual file flush might happen (and fail) on `Close` method. It's important to *always* check error. Thanos provides utility functions to log every error like those, allowing to put them in convenient `defer`: +// +// defer runutil.CloseWithLogOnErr(logger, closer, "log format message") +// +// For capturing error, use CloseWithErrCapture: +// +// var err error +// defer runutil.CloseWithErrCapture(&err, closer, "log format message") +// +// // ... +// +// If Close() returns error, err will capture it and return by argument. +// +// The rununtil.Exhaust* family of functions provide the same functionality but +// they take an io.ReadCloser and they exhaust the whole reader before closing +// them. They are useful when trying to use http keep-alive connections because +// for the same connection to be re-used the whole response body needs to be +// exhausted. +package runutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + tsdberrors "github.com/prometheus/prometheus/tsdb/errors" +) + +// Repeat executes f every interval seconds until stopc is closed or f returns an error. +// It executes f once right after being called. +func Repeat(interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + for { + if err := f(); err != nil { + return err + } + select { + case <-stopc: + return nil + case <-tick.C: + } + } +} + +// Retry executes f every interval seconds until timeout or no error is returned from f. +func Retry(interval time.Duration, stopc <-chan struct{}, f func() error) error { + return RetryWithLog(log.NewNopLogger(), interval, stopc, f) +} + +// RetryWithLog executes f every interval seconds until timeout or no error is returned from f. It logs an error on each f error. +func RetryWithLog(logger log.Logger, interval time.Duration, stopc <-chan struct{}, f func() error) error { + tick := time.NewTicker(interval) + defer tick.Stop() + + var err error + for { + if err = f(); err == nil { + return nil + } + level.Error(logger).Log("msg", "function failed. Retrying in next tick", "err", err) + select { + case <-stopc: + return err + case <-tick.C: + } + } +} + +// CloseWithLogOnErr is making sure we log every error, even those from best effort tiny closers. +func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, a ...interface{}) { + err := closer.Close() + if err == nil { + return + } + + if logger == nil { + logger = log.NewLogfmtLogger(os.Stderr) + } + + level.Warn(logger).Log("msg", "detected close error", "err", errors.Wrap(err, fmt.Sprintf(format, a...))) +} + +// ExhaustCloseWithLogOnErr closes the io.ReadCloser with a log message on error but exhausts the reader before. +func ExhaustCloseWithLogOnErr(logger log.Logger, r io.ReadCloser, format string, a ...interface{}) { + _, err := io.Copy(ioutil.Discard, r) + if err != nil { + level.Warn(logger).Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) + } + + CloseWithLogOnErr(logger, r, format, a...) +} + +// CloseWithErrCapture runs function and on error return error by argument including the given error (usually +// from caller function). +func CloseWithErrCapture(err *error, closer io.Closer, format string, a ...interface{}) { + merr := tsdberrors.MultiError{} + + merr.Add(*err) + merr.Add(errors.Wrapf(closer.Close(), format, a...)) + + *err = merr.Err() +} + +// ExhaustCloseWithErrCapture closes the io.ReadCloser with error capture but exhausts the reader before. +func ExhaustCloseWithErrCapture(err *error, r io.ReadCloser, format string, a ...interface{}) { + _, copyErr := io.Copy(ioutil.Discard, r) + + CloseWithErrCapture(err, r, format, a...) + + // Prepend the io.Copy error. + merr := tsdberrors.MultiError{} + merr.Add(copyErr) + merr.Add(*err) + + *err = merr.Err() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go new file mode 100644 index 0000000000..3a83d2490a --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go @@ -0,0 +1,516 @@ +// Package shipper detects directories on the local file system and uploads +// them to a block storage. +package shipper + +import ( + "context" + "encoding/json" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "sort" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" +) + +type metrics struct { + dirSyncs prometheus.Counter + dirSyncFailures prometheus.Counter + uploads prometheus.Counter + uploadFailures prometheus.Counter + uploadedCompacted prometheus.Gauge +} + +func newMetrics(r prometheus.Registerer, uploadCompacted bool) *metrics { + var m metrics + + m.dirSyncs = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_shipper_dir_syncs_total", + Help: "Total dir sync attempts", + }) + m.dirSyncFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_shipper_dir_sync_failures_total", + Help: "Total number of failed dir syncs", + }) + m.uploads = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_shipper_uploads_total", + Help: "Total object upload attempts", + }) + m.uploadFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_shipper_upload_failures_total", + Help: "Total number of failed object uploads", + }) + m.uploadedCompacted = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "thanos_shipper_upload_compacted_done", + Help: "If 1 it means shipper uploaded all compacted blocks from the filesystem.", + }) + + if r != nil { + r.MustRegister( + m.dirSyncs, + m.dirSyncFailures, + m.uploads, + m.uploadFailures, + ) + if uploadCompacted { + r.MustRegister(m.uploadedCompacted) + } + } + return &m +} + +// Shipper watches a directory for matching files and directories and uploads +// them to a remote data store. +type Shipper struct { + logger log.Logger + dir string + metrics *metrics + bucket objstore.Bucket + labels func() labels.Labels + source metadata.SourceType + uploadCompacted bool +} + +// New creates a new shipper that detects new TSDB blocks in dir and uploads them +// to remote if necessary. It attaches the Thanos metadata section in each meta JSON file. +func New( + logger log.Logger, + r prometheus.Registerer, + dir string, + bucket objstore.Bucket, + lbls func() labels.Labels, + source metadata.SourceType, +) *Shipper { + if logger == nil { + logger = log.NewNopLogger() + } + if lbls == nil { + lbls = func() labels.Labels { return nil } + } + + return &Shipper{ + logger: logger, + dir: dir, + bucket: bucket, + labels: lbls, + metrics: newMetrics(r, false), + source: source, + } +} + +// NewWithCompacted creates a new shipper that detects new TSDB blocks in dir and uploads them +// to remote if necessary, including compacted blocks which are already in filesystem. +// It attaches the Thanos metadata section in each meta JSON file. +func NewWithCompacted( + logger log.Logger, + r prometheus.Registerer, + dir string, + bucket objstore.Bucket, + lbls func() labels.Labels, + source metadata.SourceType, +) *Shipper { + if logger == nil { + logger = log.NewNopLogger() + } + if lbls == nil { + lbls = func() labels.Labels { return nil } + } + + return &Shipper{ + logger: logger, + dir: dir, + bucket: bucket, + labels: lbls, + metrics: newMetrics(r, true), + source: source, + uploadCompacted: true, + } +} + +// Timestamps returns the minimum timestamp for which data is available and the highest timestamp +// of blocks that were successfully uploaded. +func (s *Shipper) Timestamps() (minTime, maxSyncTime int64, err error) { + meta, err := ReadMetaFile(s.dir) + if err != nil { + return 0, 0, errors.Wrap(err, "read shipper meta file") + } + // Build a map of blocks we already uploaded. + hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded)) + for _, id := range meta.Uploaded { + hasUploaded[id] = struct{}{} + } + + minTime = math.MaxInt64 + maxSyncTime = math.MinInt64 + + if err := s.iterBlockMetas(func(m *metadata.Meta) error { + if m.MinTime < minTime { + minTime = m.MinTime + } + if _, ok := hasUploaded[m.ULID]; ok && m.MaxTime > maxSyncTime { + maxSyncTime = m.MaxTime + } + return nil + }); err != nil { + return 0, 0, errors.Wrap(err, "iter Block metas for timestamp") + } + + if minTime == math.MaxInt64 { + // No block yet found. We cannot assume any min block size so propagate 0 minTime. + minTime = 0 + } + + return minTime, maxSyncTime, nil +} + +type lazyOverlapChecker struct { + synced bool + logger log.Logger + bucket objstore.Bucket + labels func() labels.Labels + + metas []tsdb.BlockMeta + lookupMetas map[ulid.ULID]struct{} +} + +func newLazyOverlapChecker(logger log.Logger, bucket objstore.Bucket, labels func() labels.Labels) *lazyOverlapChecker { + return &lazyOverlapChecker{ + logger: logger, + bucket: bucket, + labels: labels, + + lookupMetas: map[ulid.ULID]struct{}{}, + } +} + +func (c *lazyOverlapChecker) sync(ctx context.Context) error { + if err := c.bucket.Iter(ctx, "", func(path string) error { + id, ok := block.IsBlockDir(path) + if !ok { + return nil + } + + m, err := block.DownloadMeta(ctx, c.logger, c.bucket, id) + if err != nil { + return err + } + + if !labels.FromMap(m.Thanos.Labels).Equals(c.labels()) { + return nil + } + + c.metas = append(c.metas, m.BlockMeta) + c.lookupMetas[m.ULID] = struct{}{} + return nil + + }); err != nil { + return errors.Wrap(err, "get all block meta.") + } + + c.synced = true + return nil +} + +func (c *lazyOverlapChecker) IsOverlapping(ctx context.Context, newMeta tsdb.BlockMeta) error { + if !c.synced { + level.Info(c.logger).Log("msg", "gathering all existing blocks from the remote bucket for check", "id", newMeta.ULID.String()) + if err := c.sync(ctx); err != nil { + return err + } + } + + // TODO(bwplotka) so confusing! we need to sort it first. Add comment to TSDB code. + metas := append([]tsdb.BlockMeta{newMeta}, c.metas...) + sort.Slice(metas, func(i, j int) bool { + return metas[i].MinTime < metas[j].MinTime + }) + if o := tsdb.OverlappingBlocks(metas); len(o) > 0 { + // TODO(bwplotka): Consider checking if overlaps relates to block in concern? + return errors.Errorf("shipping compacted block %s is blocked; overlap spotted: %s", newMeta.ULID, o.String()) + } + return nil +} + +// Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded +// to the object bucket once. +// +// If updload +// +// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok) +func (s *Shipper) Sync(ctx context.Context) (uploaded int, err error) { + meta, err := ReadMetaFile(s.dir) + if err != nil { + // If we encounter any error, proceed with an empty meta file and overwrite it later. + // The meta file is only used to avoid unnecessary bucket.Exists call, + // which are properly handled by the system if their occur anyway. + if !os.IsNotExist(err) { + level.Warn(s.logger).Log("msg", "reading meta file failed, will override it", "err", err) + } + meta = &Meta{Version: MetaVersion1} + } + + // Build a map of blocks we already uploaded. + hasUploaded := make(map[ulid.ULID]struct{}, len(meta.Uploaded)) + for _, id := range meta.Uploaded { + hasUploaded[id] = struct{}{} + } + + // Reset the uploaded slice so we can rebuild it only with blocks that still exist locally. + meta.Uploaded = nil + + var ( + checker = newLazyOverlapChecker(s.logger, s.bucket, s.labels) + uploadErrs int + ) + // Sync non compacted blocks first. + if err := s.iterBlockMetas(func(m *metadata.Meta) error { + // Do not sync a block if we already uploaded or ignored it. If it's no longer found in the bucket, + // it was generally removed by the compaction process. + if _, uploaded := hasUploaded[m.ULID]; uploaded { + meta.Uploaded = append(meta.Uploaded, m.ULID) + return nil + } + + if m.Stats.NumSamples == 0 { + // Ignore empty blocks. + level.Debug(s.logger).Log("msg", "ignoring empty block", "block", m.ULID) + return nil + } + + // Check against bucket if the meta file for this block exists. + ok, err := s.bucket.Exists(ctx, path.Join(m.ULID.String(), block.MetaFilename)) + if err != nil { + return errors.Wrap(err, "check exists") + } + if ok { + return nil + } + + // We only ship of the first compacted block level as normal flow. + if m.Compaction.Level > 1 { + if !s.uploadCompacted { + return nil + } + + if err := checker.IsOverlapping(ctx, m.BlockMeta); err != nil { + level.Error(s.logger).Log("msg", "found overlap or error during sync, cannot upload compacted block", "err", err) + uploadErrs++ + return nil + } + } + + if err := s.upload(ctx, m); err != nil { + level.Error(s.logger).Log("msg", "shipping failed", "block", m.ULID, "err", err) + // No error returned, just log line. This is because we want other blocks to be uploaded even + // though this one failed. It will be retried on second Sync iteration. + uploadErrs++ + return nil + } + meta.Uploaded = append(meta.Uploaded, m.ULID) + + uploaded++ + s.metrics.uploads.Inc() + return nil + }); err != nil { + s.metrics.dirSyncFailures.Inc() + return uploaded, errors.Wrap(err, "iter local block metas") + } + + if err := WriteMetaFile(s.logger, s.dir, meta); err != nil { + level.Warn(s.logger).Log("msg", "updating meta file failed", "err", err) + } + + s.metrics.dirSyncs.Inc() + + if uploadErrs > 0 { + s.metrics.uploadFailures.Add(float64(uploadErrs)) + return uploaded, errors.Errorf("failed to sync %v blocks", uploadErrs) + } else if s.uploadCompacted { + s.metrics.uploadedCompacted.Set(1) + } + return uploaded, nil +} + +// sync uploads the block if not exists in remote storage. +func (s *Shipper) upload(ctx context.Context, meta *metadata.Meta) error { + level.Info(s.logger).Log("msg", "upload new block", "id", meta.ULID) + + // We hard-link the files into a temporary upload directory so we are not affected + // by other operations happening against the TSDB directory. + updir := filepath.Join(s.dir, "thanos", "upload", meta.ULID.String()) + + // Remove updir just in case. + if err := os.RemoveAll(updir); err != nil { + return errors.Wrap(err, "clean upload directory") + } + if err := os.MkdirAll(updir, 0777); err != nil { + return errors.Wrap(err, "create upload dir") + } + defer func() { + if err := os.RemoveAll(updir); err != nil { + level.Error(s.logger).Log("msg", "failed to clean upload directory", "err", err) + } + }() + + dir := filepath.Join(s.dir, meta.ULID.String()) + if err := hardlinkBlock(dir, updir); err != nil { + return errors.Wrap(err, "hard link block") + } + // Attach current labels and write a new meta file with Thanos extensions. + if lset := s.labels(); lset != nil { + meta.Thanos.Labels = lset.Map() + } + meta.Thanos.Source = s.source + if err := metadata.Write(s.logger, updir, meta); err != nil { + return errors.Wrap(err, "write meta file") + } + return block.Upload(ctx, s.logger, s.bucket, updir) +} + +// iterBlockMetas calls f with the block meta for each block found in dir. It logs +// an error and continues if it cannot access a meta.json file. +// If f returns an error, the function returns with the same error. +func (s *Shipper) iterBlockMetas(f func(m *metadata.Meta) error) error { + names, err := fileutil.ReadDir(s.dir) + if err != nil { + return errors.Wrap(err, "read dir") + } + for _, n := range names { + if _, ok := block.IsBlockDir(n); !ok { + continue + } + dir := filepath.Join(s.dir, n) + + fi, err := os.Stat(dir) + if err != nil { + level.Warn(s.logger).Log("msg", "open file failed", "err", err) + continue + } + if !fi.IsDir() { + continue + } + m, err := metadata.Read(dir) + if err != nil { + level.Warn(s.logger).Log("msg", "reading meta file failed", "err", err) + continue + } + if err := f(m); err != nil { + return err + } + } + return nil +} + +func hardlinkBlock(src, dst string) error { + chunkDir := filepath.Join(dst, block.ChunksDirname) + + if err := os.MkdirAll(chunkDir, 0777); err != nil { + return errors.Wrap(err, "create chunks dir") + } + + files, err := fileutil.ReadDir(filepath.Join(src, block.ChunksDirname)) + if err != nil { + return errors.Wrap(err, "read chunk dir") + } + for i, fn := range files { + files[i] = filepath.Join(block.ChunksDirname, fn) + } + files = append(files, block.MetaFilename, block.IndexFilename) + + for _, fn := range files { + if err := os.Link(filepath.Join(src, fn), filepath.Join(dst, fn)); err != nil { + return errors.Wrapf(err, "hard link file %s", fn) + } + } + return nil +} + +// Meta defines the format thanos.shipper.json file that the shipper places in the data directory. +type Meta struct { + Version int `json:"version"` + Uploaded []ulid.ULID `json:"uploaded"` +} + +const ( + // MetaFilename is the known JSON filename for meta information. + MetaFilename = "thanos.shipper.json" + + // MetaVersion1 represents 1 version of meta. + MetaVersion1 = 1 +) + +// WriteMetaFile writes the given meta into /thanos.shipper.json. +func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error { + // Make any changes to the file appear atomic. + path := filepath.Join(dir, MetaFilename) + tmp := path + ".tmp" + + f, err := os.Create(tmp) + if err != nil { + return err + } + + enc := json.NewEncoder(f) + enc.SetIndent("", "\t") + + if err := enc.Encode(meta); err != nil { + runutil.CloseWithLogOnErr(logger, f, "write meta file close") + return err + } + if err := f.Close(); err != nil { + return err + } + return renameFile(logger, tmp, path) +} + +// ReadMetaFile reads the given meta from /thanos.shipper.json. +func ReadMetaFile(dir string) (*Meta, error) { + b, err := ioutil.ReadFile(filepath.Join(dir, MetaFilename)) + if err != nil { + return nil, err + } + var m Meta + + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if m.Version != MetaVersion1 { + return nil, errors.Errorf("unexpected meta file version %d", m.Version) + } + + return &m, nil +} + +func renameFile(logger log.Logger, from, to string) error { + if err := os.RemoveAll(to); err != nil { + return err + } + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := fileutil.OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + + if err = fileutil.Fdatasync(pdir); err != nil { + runutil.CloseWithLogOnErr(logger, pdir, "rename file dir close") + return err + } + return pdir.Close() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go new file mode 100644 index 0000000000..100b063c41 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -0,0 +1,1951 @@ +package store + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/run" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/extprom" + "github.com/thanos-io/thanos/pkg/model" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/pool" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/strutil" + "github.com/thanos-io/thanos/pkg/tracing" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// maxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed +// for precalculating the number of samples that we may have to retrieve and decode for any given query +// without downloading them. Please take a look at https://github.com/prometheus/tsdb/pull/397 to know +// where this number comes from. Long story short: TSDB is made in such a way, and it is made in such a way +// because you barely get any improvements in compression when the number of samples is beyond this. +// Take a look at Figure 6 in this whitepaper http://www.vldb.org/pvldb/vol8/p1816-teller.pdf. +const maxSamplesPerChunk = 120 + +type bucketStoreMetrics struct { + blocksLoaded prometheus.Gauge + blockLoads prometheus.Counter + blockLoadFailures prometheus.Counter + blockDrops prometheus.Counter + blockDropFailures prometheus.Counter + seriesDataTouched *prometheus.SummaryVec + seriesDataFetched *prometheus.SummaryVec + seriesDataSizeTouched *prometheus.SummaryVec + seriesDataSizeFetched *prometheus.SummaryVec + seriesBlocksQueried prometheus.Summary + seriesGetAllDuration prometheus.Histogram + seriesMergeDuration prometheus.Histogram + resultSeriesCount prometheus.Summary + chunkSizeBytes prometheus.Histogram + queriesDropped prometheus.Counter + queriesLimit prometheus.Gauge +} + +func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { + var m bucketStoreMetrics + + m.blockLoads = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_block_loads_total", + Help: "Total number of remote block loading attempts.", + }) + m.blockLoadFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_block_load_failures_total", + Help: "Total number of failed remote block loading attempts.", + }) + m.blockDrops = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_block_drops_total", + Help: "Total number of local blocks that were dropped.", + }) + m.blockDropFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_block_drop_failures_total", + Help: "Total number of local blocks that failed to be dropped.", + }) + m.blocksLoaded = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "thanos_bucket_store_blocks_loaded", + Help: "Number of currently loaded blocks.", + }) + + m.seriesDataTouched = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_data_touched", + Help: "How many items of a data type in a block were touched for a single series request.", + }, []string{"data_type"}) + m.seriesDataFetched = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_data_fetched", + Help: "How many items of a data type in a block were fetched for a single series request.", + }, []string{"data_type"}) + + m.seriesDataSizeTouched = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_data_size_touched_bytes", + Help: "Size of all items of a data type in a block were touched for a single series request.", + }, []string{"data_type"}) + m.seriesDataSizeFetched = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_data_size_fetched_bytes", + Help: "Size of all items of a data type in a block were fetched for a single series request.", + }, []string{"data_type"}) + + m.seriesBlocksQueried = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_blocks_queried", + Help: "Number of blocks in a bucket store that were touched to satisfy a query.", + }) + m.seriesGetAllDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_series_get_all_duration_seconds", + Help: "Time it takes until all per-block prepares and preloads for a query are finished.", + Buckets: []float64{ + 0.01, 0.05, 0.1, 0.25, 0.6, 1, 2, 3.5, 5, 7.5, 10, 15, 30, 60, + }, + }) + m.seriesMergeDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_series_merge_duration_seconds", + Help: "Time it takes to merge sub-results from all queried blocks into a single result.", + Buckets: []float64{ + 0.01, 0.05, 0.1, 0.25, 0.6, 1, 2, 3.5, 5, 7.5, 10, 15, 30, 60, + }, + }) + m.resultSeriesCount = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "thanos_bucket_store_series_result_series", + Help: "Number of series observed in the final result of a query.", + }) + + m.chunkSizeBytes = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "thanos_bucket_store_sent_chunk_size_bytes", + Help: "Size in bytes of the chunks for the single series, which is adequate to the gRPC message size sent to querier.", + Buckets: []float64{ + 32, 256, 512, 1024, 32 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024, 32 * 1024 * 1024, 256 * 1024 * 1024, 512 * 1024 * 1024, + }, + }) + + m.queriesDropped = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "thanos_bucket_store_queries_dropped_total", + Help: "Number of queries that were dropped due to the sample limit.", + }) + m.queriesLimit = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "thanos_bucket_store_queries_concurrent_max", + Help: "Number of maximum concurrent queries.", + }) + + if reg != nil { + reg.MustRegister( + m.blockLoads, + m.blockLoadFailures, + m.blockDrops, + m.blockDropFailures, + m.blocksLoaded, + m.seriesDataTouched, + m.seriesDataFetched, + m.seriesDataSizeTouched, + m.seriesDataSizeFetched, + m.seriesBlocksQueried, + m.seriesGetAllDuration, + m.seriesMergeDuration, + m.resultSeriesCount, + m.chunkSizeBytes, + m.queriesDropped, + m.queriesLimit, + ) + } + return &m +} + +type indexCache interface { + SetPostings(b ulid.ULID, l labels.Label, v []byte) + Postings(b ulid.ULID, l labels.Label) ([]byte, bool) + SetSeries(b ulid.ULID, id uint64, v []byte) + Series(b ulid.ULID, id uint64) ([]byte, bool) +} + +// FilterConfig is a configuration, which Store uses for filtering metrics. +type FilterConfig struct { + MinTime, MaxTime model.TimeOrDurationValue +} + +// BucketStore implements the store API backed by a bucket. It loads all index +// files to local disk. +type BucketStore struct { + logger log.Logger + metrics *bucketStoreMetrics + bucket objstore.BucketReader + dir string + indexCache indexCache + chunkPool *pool.BytesPool + + // Sets of blocks that have the same labels. They are indexed by a hash over their label set. + mtx sync.RWMutex + blocks map[ulid.ULID]*bucketBlock + blockSets map[uint64]*bucketBlockSet + + // Verbose enabled additional logging. + debugLogging bool + // Number of goroutines to use when syncing blocks from object storage. + blockSyncConcurrency int + + // Query gate which limits the maximum amount of concurrent queries. + queryGate *Gate + + // samplesLimiter limits the number of samples per each Series() call. + samplesLimiter *Limiter + partitioner partitioner + + filterConfig *FilterConfig +} + +// NewBucketStore creates a new bucket backed store that implements the store API against +// an object store bucket. It is optimized to work against high latency backends. +func NewBucketStore( + logger log.Logger, + reg prometheus.Registerer, + bucket objstore.BucketReader, + dir string, + indexCache indexCache, + maxChunkPoolBytes uint64, + maxSampleCount uint64, + maxConcurrent int, + debugLogging bool, + blockSyncConcurrency int, + filterConf *FilterConfig, +) (*BucketStore, error) { + if logger == nil { + logger = log.NewNopLogger() + } + + if maxConcurrent < 0 { + return nil, errors.Errorf("max concurrency value cannot be lower than 0 (got %v)", maxConcurrent) + } + + chunkPool, err := pool.NewBytesPool(2e5, 50e6, 2, maxChunkPoolBytes) + if err != nil { + return nil, errors.Wrap(err, "create chunk pool") + } + + const maxGapSize = 512 * 1024 + + metrics := newBucketStoreMetrics(reg) + s := &BucketStore{ + logger: logger, + bucket: bucket, + dir: dir, + indexCache: indexCache, + chunkPool: chunkPool, + blocks: map[ulid.ULID]*bucketBlock{}, + blockSets: map[uint64]*bucketBlockSet{}, + debugLogging: debugLogging, + blockSyncConcurrency: blockSyncConcurrency, + queryGate: NewGate( + maxConcurrent, + extprom.WrapRegistererWithPrefix("thanos_bucket_store_series_", reg), + ), + samplesLimiter: NewLimiter(maxSampleCount, metrics.queriesDropped), + partitioner: gapBasedPartitioner{maxGapSize: maxGapSize}, + filterConfig: filterConf, + } + s.metrics = metrics + + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, errors.Wrap(err, "create dir") + } + + s.metrics.queriesLimit.Set(float64(maxConcurrent)) + + return s, nil +} + +// Close the store. +func (s *BucketStore) Close() (err error) { + s.mtx.Lock() + defer s.mtx.Unlock() + + for _, b := range s.blocks { + if e := b.Close(); e != nil { + level.Warn(s.logger).Log("msg", "closing Bucket block failed", "err", err) + err = e + } + } + return err +} + +// SyncBlocks synchronizes the stores state with the Bucket bucket. +// It will reuse disk space as persistent cache based on s.dir param. +func (s *BucketStore) SyncBlocks(ctx context.Context) error { + var wg sync.WaitGroup + blockc := make(chan ulid.ULID) + + for i := 0; i < s.blockSyncConcurrency; i++ { + wg.Add(1) + go func() { + for id := range blockc { + if err := s.addBlock(ctx, id); err != nil { + level.Warn(s.logger).Log("msg", "loading block failed", "id", id, "err", err) + continue + } + } + wg.Done() + }() + } + + allIDs := map[ulid.ULID]struct{}{} + + err := s.bucket.Iter(ctx, "", func(name string) error { + // Strip trailing slash indicating a directory. + id, err := ulid.Parse(name[:len(name)-1]) + if err != nil { + return nil + } + + inRange, err := s.isBlockInMinMaxRange(ctx, id) + if err != nil { + level.Warn(s.logger).Log("msg", "error parsing block range", "block", id, "err", err) + return nil + } + + if !inRange { + return nil + } + + allIDs[id] = struct{}{} + + if b := s.getBlock(id); b != nil { + return nil + } + select { + case <-ctx.Done(): + case blockc <- id: + } + return nil + }) + + close(blockc) + wg.Wait() + + if err != nil { + return errors.Wrap(err, "iter") + } + // Drop all blocks that are no longer present in the bucket. + for id := range s.blocks { + if _, ok := allIDs[id]; ok { + continue + } + if err := s.removeBlock(id); err != nil { + level.Warn(s.logger).Log("msg", "drop outdated block", "block", id, "err", err) + s.metrics.blockDropFailures.Inc() + } + s.metrics.blockDrops.Inc() + } + + return nil +} + +// InitialSync perform blocking sync with extra step at the end to delete locally saved blocks that are no longer +// present in the bucket. The mismatch of these can only happen between restarts, so we can do that only once per startup. +func (s *BucketStore) InitialSync(ctx context.Context) error { + if err := s.SyncBlocks(ctx); err != nil { + return errors.Wrap(err, "sync block") + } + + names, err := fileutil.ReadDir(s.dir) + if err != nil { + return errors.Wrap(err, "read dir") + } + for _, n := range names { + id, ok := block.IsBlockDir(n) + if !ok { + continue + } + if b := s.getBlock(id); b != nil { + continue + } + + // No such block loaded, remove the local dir. + if err := os.RemoveAll(path.Join(s.dir, id.String())); err != nil { + level.Warn(s.logger).Log("msg", "failed to remove block which is not needed", "err", err) + } + } + + return nil +} + +func (s *BucketStore) numBlocks() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + return len(s.blocks) +} + +func (s *BucketStore) isBlockInMinMaxRange(ctx context.Context, id ulid.ULID) (bool, error) { + dir := filepath.Join(s.dir, id.String()) + + err, meta := loadMeta(ctx, s.logger, s.bucket, dir, id) + if err != nil { + return false, err + } + + // We check for blocks in configured minTime, maxTime range. + switch { + case meta.MaxTime <= s.filterConfig.MinTime.PrometheusTimestamp(): + return false, nil + + case meta.MinTime >= s.filterConfig.MaxTime.PrometheusTimestamp(): + return false, nil + } + + return true, nil +} + +func (s *BucketStore) getBlock(id ulid.ULID) *bucketBlock { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.blocks[id] +} + +func (s *BucketStore) addBlock(ctx context.Context, id ulid.ULID) (err error) { + dir := filepath.Join(s.dir, id.String()) + + defer func() { + if err != nil { + s.metrics.blockLoadFailures.Inc() + if err2 := os.RemoveAll(dir); err2 != nil { + level.Warn(s.logger).Log("msg", "failed to remove block we cannot load", "err", err2) + } + } + }() + s.metrics.blockLoads.Inc() + + b, err := newBucketBlock( + ctx, + log.With(s.logger, "block", id), + s.bucket, + id, + dir, + s.indexCache, + s.chunkPool, + s.partitioner, + ) + if err != nil { + return errors.Wrap(err, "new bucket block") + } + s.mtx.Lock() + defer s.mtx.Unlock() + + lset := labels.FromMap(b.meta.Thanos.Labels) + h := lset.Hash() + + set, ok := s.blockSets[h] + if !ok { + set = newBucketBlockSet(lset) + s.blockSets[h] = set + } + + if err = set.add(b); err != nil { + return errors.Wrap(err, "add block to set") + } + s.blocks[b.meta.ULID] = b + + s.metrics.blocksLoaded.Inc() + + return nil +} + +func (s *BucketStore) removeBlock(id ulid.ULID) error { + s.mtx.Lock() + b, ok := s.blocks[id] + if ok { + lset := labels.FromMap(b.meta.Thanos.Labels) + s.blockSets[lset.Hash()].remove(id) + delete(s.blocks, id) + } + s.mtx.Unlock() + + if !ok { + return nil + } + + s.metrics.blocksLoaded.Dec() + if err := b.Close(); err != nil { + return errors.Wrap(err, "close block") + } + return os.RemoveAll(b.dir) +} + +// TimeRange returns the minimum and maximum timestamp of data available in the store. +func (s *BucketStore) TimeRange() (mint, maxt int64) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + mint = math.MaxInt64 + maxt = math.MinInt64 + + for _, b := range s.blocks { + if b.meta.MinTime < mint { + mint = b.meta.MinTime + } + if b.meta.MaxTime > maxt { + maxt = b.meta.MaxTime + } + } + + mint = s.limitMinTime(mint) + maxt = s.limitMaxTime(maxt) + + return mint, maxt +} + +// Info implements the storepb.StoreServer interface. +func (s *BucketStore) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error) { + mint, maxt := s.TimeRange() + // Store nodes hold global data and thus have no labels. + return &storepb.InfoResponse{ + StoreType: component.Store.ToProto(), + MinTime: mint, + MaxTime: maxt, + }, nil +} + +func (s *BucketStore) limitMinTime(mint int64) int64 { + filterMinTime := s.filterConfig.MinTime.PrometheusTimestamp() + + if mint < filterMinTime { + return filterMinTime + } + + return mint +} + +func (s *BucketStore) limitMaxTime(maxt int64) int64 { + filterMaxTime := s.filterConfig.MaxTime.PrometheusTimestamp() + + if maxt > filterMaxTime { + maxt = filterMaxTime + } + + return maxt +} + +type seriesEntry struct { + lset []storepb.Label + refs []uint64 + chks []storepb.AggrChunk +} + +type bucketSeriesSet struct { + set []seriesEntry + i int + err error +} + +func newBucketSeriesSet(set []seriesEntry) *bucketSeriesSet { + return &bucketSeriesSet{ + set: set, + i: -1, + } +} + +func (s *bucketSeriesSet) Next() bool { + if s.i >= len(s.set)-1 { + return false + } + s.i++ + return true +} + +func (s *bucketSeriesSet) At() ([]storepb.Label, []storepb.AggrChunk) { + return s.set[s.i].lset, s.set[s.i].chks +} + +func (s *bucketSeriesSet) Err() error { + return s.err +} + +func blockSeries( + ctx context.Context, + ulid ulid.ULID, + extLset map[string]string, + indexr *bucketIndexReader, + chunkr *bucketChunkReader, + matchers []labels.Matcher, + req *storepb.SeriesRequest, + samplesLimiter *Limiter, +) (storepb.SeriesSet, *queryStats, error) { + ps, err := indexr.ExpandedPostings(matchers) + if err != nil { + return nil, nil, errors.Wrap(err, "expanded matching posting") + } + + if len(ps) == 0 { + return storepb.EmptySeriesSet(), indexr.stats, nil + } + + // Preload all series index data. + // TODO(bwplotka): Consider not keeping all series in memory all the time. + // TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method. + if err := indexr.PreloadSeries(ps); err != nil { + return nil, nil, errors.Wrap(err, "preload series") + } + + // Transform all series into the response types and mark their relevant chunks + // for preloading. + var ( + res []seriesEntry + lset labels.Labels + chks []chunks.Meta + ) + for _, id := range ps { + if err := indexr.LoadedSeries(id, &lset, &chks); err != nil { + return nil, nil, errors.Wrap(err, "read series") + } + s := seriesEntry{ + lset: make([]storepb.Label, 0, len(lset)), + refs: make([]uint64, 0, len(chks)), + chks: make([]storepb.AggrChunk, 0, len(chks)), + } + for _, l := range lset { + // Skip if the external labels of the block overrule the series' label. + // NOTE(fabxc): maybe move it to a prefixed version to still ensure uniqueness of series? + if extLset[l.Name] != "" { + continue + } + s.lset = append(s.lset, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + for ln, lv := range extLset { + s.lset = append(s.lset, storepb.Label{ + Name: ln, + Value: lv, + }) + } + sort.Slice(s.lset, func(i, j int) bool { + return s.lset[i].Name < s.lset[j].Name + }) + + for _, meta := range chks { + if meta.MaxTime < req.MinTime { + continue + } + if meta.MinTime > req.MaxTime { + break + } + + if err := chunkr.addPreload(meta.Ref); err != nil { + return nil, nil, errors.Wrap(err, "add chunk preload") + } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) + } + if len(s.chks) > 0 { + res = append(res, s) + } + } + + // Preload all chunks that were marked in the previous stage. + if err := chunkr.preload(samplesLimiter); err != nil { + return nil, nil, errors.Wrap(err, "preload chunks") + } + + // Transform all chunks into the response format. + for _, s := range res { + for i, ref := range s.refs { + chk, err := chunkr.Chunk(ref) + if err != nil { + return nil, nil, errors.Wrap(err, "get chunk") + } + if err := populateChunk(&s.chks[i], chk, req.Aggregates); err != nil { + return nil, nil, errors.Wrap(err, "populate chunk") + } + } + } + + return newBucketSeriesSet(res), indexr.stats.merge(chunkr.stats), nil +} + +func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Aggr) error { + if in.Encoding() == chunkenc.EncXOR { + out.Raw = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: in.Bytes()} + return nil + } + if in.Encoding() != downsample.ChunkEncAggr { + return errors.Errorf("unsupported chunk encoding %d", in.Encoding()) + } + + ac := downsample.AggrChunk(in.Bytes()) + + for _, at := range aggrs { + switch at { + case storepb.Aggr_COUNT: + x, err := ac.Get(downsample.AggrCount) + if err != nil { + return errors.Errorf("aggregate %s does not exist", downsample.AggrCount) + } + out.Count = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: x.Bytes()} + case storepb.Aggr_SUM: + x, err := ac.Get(downsample.AggrSum) + if err != nil { + return errors.Errorf("aggregate %s does not exist", downsample.AggrSum) + } + out.Sum = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: x.Bytes()} + case storepb.Aggr_MIN: + x, err := ac.Get(downsample.AggrMin) + if err != nil { + return errors.Errorf("aggregate %s does not exist", downsample.AggrMin) + } + out.Min = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: x.Bytes()} + case storepb.Aggr_MAX: + x, err := ac.Get(downsample.AggrMax) + if err != nil { + return errors.Errorf("aggregate %s does not exist", downsample.AggrMax) + } + out.Max = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: x.Bytes()} + case storepb.Aggr_COUNTER: + x, err := ac.Get(downsample.AggrCounter) + if err != nil { + return errors.Errorf("aggregate %s does not exist", downsample.AggrCounter) + } + out.Counter = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: x.Bytes()} + } + } + return nil +} + +// debugFoundBlockSetOverview logs on debug level what exactly blocks we used for query in terms of +// labels and resolution. This is important because we allow mixed resolution results, so it is quite crucial +// to be aware what exactly resolution we see on query. +// TODO(bplotka): Consider adding resolution label to all results to propagate that info to UI and Query API. +func debugFoundBlockSetOverview(logger log.Logger, mint, maxt, maxResolutionMillis int64, lset labels.Labels, bs []*bucketBlock) { + if len(bs) == 0 { + level.Debug(logger).Log("msg", "No block found", "mint", mint, "maxt", maxt, "lset", lset.String()) + return + } + + var ( + parts []string + currRes = int64(-1) + currMin, currMax int64 + ) + for _, b := range bs { + if currRes == b.meta.Thanos.Downsample.Resolution { + currMax = b.meta.MaxTime + continue + } + + if currRes != -1 { + parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) + } + + currRes = b.meta.Thanos.Downsample.Resolution + currMin = b.meta.MinTime + currMax = b.meta.MaxTime + } + + parts = append(parts, fmt.Sprintf("Range: %d-%d Resolution: %d", currMin, currMax, currRes)) + + level.Debug(logger).Log("msg", "Blocks source resolutions", "blocks", len(bs), "Maximum Resolution", maxResolutionMillis, "mint", mint, "maxt", maxt, "lset", lset.String(), "spans", strings.Join(parts, "\n")) +} + +// Series implements the storepb.StoreServer interface. +func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) (err error) { + { + span, _ := tracing.StartSpan(srv.Context(), "store_query_gate_ismyturn") + err := s.queryGate.IsMyTurn(srv.Context()) + span.Finish() + if err != nil { + return errors.Wrapf(err, "failed to wait for turn") + } + } + defer s.queryGate.Done() + + matchers, err := translateMatchers(req.Matchers) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + req.MinTime = s.limitMinTime(req.MinTime) + req.MaxTime = s.limitMaxTime(req.MaxTime) + + var ( + stats = &queryStats{} + g run.Group + res []storepb.SeriesSet + mtx sync.Mutex + ) + s.mtx.RLock() + + for _, bs := range s.blockSets { + blockMatchers, ok := bs.labelMatchers(matchers...) + if !ok { + continue + } + blocks := bs.getFor(req.MinTime, req.MaxTime, req.MaxResolutionWindow) + + if s.debugLogging { + debugFoundBlockSetOverview(s.logger, req.MinTime, req.MaxTime, req.MaxResolutionWindow, bs.labels, blocks) + } + + for _, b := range blocks { + stats.blocksQueried++ + + b := b + ctx, cancel := context.WithCancel(srv.Context()) + + // We must keep the readers open until all their data has been sent. + indexr := b.indexReader(ctx) + chunkr := b.chunkReader(ctx) + + // Defer all closes to the end of Series method. + defer runutil.CloseWithLogOnErr(s.logger, indexr, "series block") + defer runutil.CloseWithLogOnErr(s.logger, chunkr, "series block") + + g.Add(func() error { + part, pstats, err := blockSeries(ctx, + b.meta.ULID, + b.meta.Thanos.Labels, + indexr, + chunkr, + blockMatchers, + req, + s.samplesLimiter, + ) + if err != nil { + return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) + } + + mtx.Lock() + res = append(res, part) + stats = stats.merge(pstats) + mtx.Unlock() + + return nil + }, func(err error) { + if err != nil { + cancel() + } + }) + } + } + + s.mtx.RUnlock() + + defer func() { + s.metrics.seriesDataTouched.WithLabelValues("postings").Observe(float64(stats.postingsTouched)) + s.metrics.seriesDataFetched.WithLabelValues("postings").Observe(float64(stats.postingsFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("postings").Observe(float64(stats.postingsTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("postings").Observe(float64(stats.postingsFetchedSizeSum)) + s.metrics.seriesDataTouched.WithLabelValues("series").Observe(float64(stats.seriesTouched)) + s.metrics.seriesDataFetched.WithLabelValues("series").Observe(float64(stats.seriesFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("series").Observe(float64(stats.seriesTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("series").Observe(float64(stats.seriesFetchedSizeSum)) + s.metrics.seriesDataTouched.WithLabelValues("chunks").Observe(float64(stats.chunksTouched)) + s.metrics.seriesDataFetched.WithLabelValues("chunks").Observe(float64(stats.chunksFetched)) + s.metrics.seriesDataSizeTouched.WithLabelValues("chunks").Observe(float64(stats.chunksTouchedSizeSum)) + s.metrics.seriesDataSizeFetched.WithLabelValues("chunks").Observe(float64(stats.chunksFetchedSizeSum)) + s.metrics.resultSeriesCount.Observe(float64(stats.mergedSeriesCount)) + + level.Debug(s.logger).Log("msg", "stats query processed", + "stats", fmt.Sprintf("%+v", stats), "err", err) + }() + + // Concurrently get data from all blocks. + { + span, _ := tracing.StartSpan(srv.Context(), "bucket_store_preload_all") + begin := time.Now() + err := g.Run() + span.Finish() + + if err != nil { + return status.Error(codes.Aborted, err.Error()) + } + stats.getAllDuration = time.Since(begin) + s.metrics.seriesGetAllDuration.Observe(stats.getAllDuration.Seconds()) + s.metrics.seriesBlocksQueried.Observe(float64(stats.blocksQueried)) + } + // Merge the sub-results from each selected block. + { + span, _ := tracing.StartSpan(srv.Context(), "bucket_store_merge_all") + defer span.Finish() + + begin := time.Now() + + // Merge series set into an union of all block sets. This exposes all blocks are single seriesSet. + // Chunks of returned series might be out of order w.r.t to their time range. + // This must be accounted for later by clients. + set := storepb.MergeSeriesSets(res...) + for set.Next() { + var series storepb.Series + + series.Labels, series.Chunks = set.At() + + stats.mergedSeriesCount++ + stats.mergedChunksCount += len(series.Chunks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + + if err := srv.Send(storepb.NewSeriesResponse(&series)); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + } + } + if set.Err() != nil { + return status.Error(codes.Unknown, errors.Wrap(set.Err(), "expand series set").Error()) + } + stats.mergeDuration = time.Since(begin) + s.metrics.seriesMergeDuration.Observe(stats.mergeDuration.Seconds()) + } + return nil +} + +func chunksSize(chks []storepb.AggrChunk) (size int) { + for _, chk := range chks { + size += chk.Size() // This gets the encoded proto size. + } + return size +} + +// LabelNames implements the storepb.StoreServer interface. +func (s *BucketStore) LabelNames(ctx context.Context, _ *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + g, gctx := errgroup.WithContext(ctx) + + s.mtx.RLock() + + var mtx sync.Mutex + var sets [][]string + + for _, b := range s.blocks { + indexr := b.indexReader(gctx) + g.Go(func() error { + defer runutil.CloseWithLogOnErr(s.logger, indexr, "label names") + + res := indexr.LabelNames() + sort.Strings(res) + + mtx.Lock() + sets = append(sets, res) + mtx.Unlock() + + return nil + }) + } + + s.mtx.RUnlock() + + if err := g.Wait(); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &storepb.LabelNamesResponse{ + Names: strutil.MergeSlices(sets...), + }, nil +} + +// LabelValues implements the storepb.StoreServer interface. +func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + g, gctx := errgroup.WithContext(ctx) + + s.mtx.RLock() + + var mtx sync.Mutex + var sets [][]string + + for _, b := range s.blocks { + indexr := b.indexReader(gctx) + // TODO(fabxc): only aggregate chunk metas first and add a subsequent fetch stage + // where we consolidate requests. + g.Go(func() error { + defer runutil.CloseWithLogOnErr(s.logger, indexr, "label values") + + res := indexr.LabelValues(req.Label) + + mtx.Lock() + sets = append(sets, res) + mtx.Unlock() + + return nil + }) + } + + s.mtx.RUnlock() + + if err := g.Wait(); err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + return &storepb.LabelValuesResponse{ + Values: strutil.MergeSlices(sets...), + }, nil +} + +// bucketBlockSet holds all blocks of an equal label set. It internally splits +// them up by downsampling resolution and allows querying +type bucketBlockSet struct { + labels labels.Labels + mtx sync.RWMutex + resolutions []int64 // available resolution, high to low (in milliseconds) + blocks [][]*bucketBlock // ordered buckets for the existing resolutions +} + +// newBucketBlockSet initializes a new set with the known downsampling windows hard-configured. +// The set currently does not support arbitrary ranges. +func newBucketBlockSet(lset labels.Labels) *bucketBlockSet { + return &bucketBlockSet{ + labels: lset, + resolutions: []int64{downsample.ResLevel2, downsample.ResLevel1, downsample.ResLevel0}, + blocks: make([][]*bucketBlock, 3), + } +} + +func (s *bucketBlockSet) add(b *bucketBlock) error { + if !s.labels.Equals(labels.FromMap(b.meta.Thanos.Labels)) { + return errors.New("block's label set does not match set") + } + s.mtx.Lock() + defer s.mtx.Unlock() + + i := int64index(s.resolutions, b.meta.Thanos.Downsample.Resolution) + if i < 0 { + return errors.Errorf("unsupported downsampling resolution %d", b.meta.Thanos.Downsample.Resolution) + } + bs := append(s.blocks[i], b) + s.blocks[i] = bs + + sort.Slice(bs, func(j, k int) bool { + return bs[j].meta.MinTime < bs[k].meta.MinTime + }) + return nil +} + +func (s *bucketBlockSet) remove(id ulid.ULID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + for i, bs := range s.blocks { + for j, b := range bs { + if b.meta.ULID != id { + continue + } + s.blocks[i] = append(bs[:j], bs[j+1:]...) + return + } + } +} + +func int64index(s []int64, x int64) int { + for i, v := range s { + if v == x { + return i + } + } + return -1 +} + +// getFor returns a time-ordered list of blocks that cover date between mint and maxt. +// Blocks with the biggest resolution possible but not bigger than the given max resolution are returned. +func (s *bucketBlockSet) getFor(mint, maxt, maxResolutionMillis int64) (bs []*bucketBlock) { + if mint == maxt { + return nil + } + + s.mtx.RLock() + defer s.mtx.RUnlock() + + // Find first matching resolution. + i := 0 + for ; i < len(s.resolutions) && s.resolutions[i] > maxResolutionMillis; i++ { + } + + // Fill the given interval with the blocks for the current resolution. + // Our current resolution might not cover all data, so recursively fill the gaps with higher resolution blocks if there is any. + start := mint + for _, b := range s.blocks[i] { + if b.meta.MaxTime <= mint { + continue + } + if b.meta.MinTime >= maxt { + break + } + + if i+1 < len(s.resolutions) { + bs = append(bs, s.getFor(start, b.meta.MinTime, s.resolutions[i+1])...) + } + bs = append(bs, b) + start = b.meta.MaxTime + } + + if i+1 < len(s.resolutions) { + bs = append(bs, s.getFor(start, maxt, s.resolutions[i+1])...) + } + return bs +} + +// labelMatchers verifies whether the block set matches the given matchers and returns a new +// set of matchers that is equivalent when querying data within the block. +func (s *bucketBlockSet) labelMatchers(matchers ...labels.Matcher) ([]labels.Matcher, bool) { + res := make([]labels.Matcher, 0, len(matchers)) + + for _, m := range matchers { + v := s.labels.Get(m.Name()) + if v == "" { + res = append(res, m) + continue + } + if !m.Matches(v) { + return nil, false + } + } + return res, true +} + +// bucketBlock represents a block that is located in a bucket. It holds intermediate +// state for the block on local disk. +type bucketBlock struct { + logger log.Logger + bucket objstore.BucketReader + meta *metadata.Meta + dir string + indexCache indexCache + chunkPool *pool.BytesPool + + indexVersion int + symbols []string + lvals map[string][]string + postings map[labels.Label]index.Range + + id ulid.ULID + chunkObjs []string + + pendingReaders sync.WaitGroup + + partitioner partitioner +} + +func newBucketBlock( + ctx context.Context, + logger log.Logger, + bkt objstore.BucketReader, + id ulid.ULID, + dir string, + indexCache indexCache, + chunkPool *pool.BytesPool, + p partitioner, +) (b *bucketBlock, err error) { + b = &bucketBlock{ + logger: logger, + bucket: bkt, + id: id, + indexCache: indexCache, + chunkPool: chunkPool, + dir: dir, + partitioner: p, + } + err, meta := loadMeta(ctx, logger, bkt, dir, id) + if err != nil { + return nil, errors.Wrap(err, "load meta") + } + b.meta = meta + + if err = b.loadIndexCacheFile(ctx); err != nil { + return nil, errors.Wrap(err, "load index cache") + } + // Get object handles for all chunk files. + err = bkt.Iter(ctx, path.Join(id.String(), block.ChunksDirname), func(n string) error { + b.chunkObjs = append(b.chunkObjs, n) + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "list chunk files") + } + return b, nil +} + +func (b *bucketBlock) indexFilename() string { + return path.Join(b.id.String(), block.IndexFilename) +} + +func (b *bucketBlock) indexCacheFilename() string { + return path.Join(b.id.String(), block.IndexCacheFilename) +} + +func loadMeta(ctx context.Context, logger log.Logger, bucket objstore.BucketReader, dir string, id ulid.ULID) (error, *metadata.Meta) { + // If we haven't seen the block before download the meta.json file. + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err := os.MkdirAll(dir, 0777); err != nil { + return errors.Wrap(err, "create dir"), nil + } + src := path.Join(id.String(), block.MetaFilename) + + if err := objstore.DownloadFile(ctx, logger, bucket, src, dir); err != nil { + return errors.Wrap(err, "download meta.json"), nil + } + } else if err != nil { + return err, nil + } + meta, err := metadata.Read(dir) + if err != nil { + return errors.Wrap(err, "read meta.json"), nil + } + + return nil, meta +} + +func (b *bucketBlock) loadIndexCacheFile(ctx context.Context) (err error) { + cachefn := filepath.Join(b.dir, block.IndexCacheFilename) + if err = b.loadIndexCacheFileFromFile(ctx, cachefn); err == nil { + return nil + } + if !os.IsNotExist(errors.Cause(err)) { + return errors.Wrap(err, "read index cache") + } + + // Try to download index cache file from object store. + if err = objstore.DownloadFile(ctx, b.logger, b.bucket, b.indexCacheFilename(), cachefn); err == nil { + return b.loadIndexCacheFileFromFile(ctx, cachefn) + } + + if !b.bucket.IsObjNotFoundErr(errors.Cause(err)) { + return errors.Wrap(err, "download index cache file") + } + + // No cache exists on disk yet, build it from the downloaded index and retry. + fn := filepath.Join(b.dir, block.IndexFilename) + + if err := objstore.DownloadFile(ctx, b.logger, b.bucket, b.indexFilename(), fn); err != nil { + return errors.Wrap(err, "download index file") + } + + defer func() { + if rerr := os.Remove(fn); rerr != nil { + level.Error(b.logger).Log("msg", "failed to remove temp index file", "path", fn, "err", rerr) + } + }() + + if err := block.WriteIndexCache(b.logger, fn, cachefn); err != nil { + return errors.Wrap(err, "write index cache") + } + + return errors.Wrap(b.loadIndexCacheFileFromFile(ctx, cachefn), "read index cache") +} + +func (b *bucketBlock) loadIndexCacheFileFromFile(ctx context.Context, cache string) (err error) { + b.indexVersion, b.symbols, b.lvals, b.postings, err = block.ReadIndexCache(b.logger, cache) + return err +} + +func (b *bucketBlock) readIndexRange(ctx context.Context, off, length int64) ([]byte, error) { + r, err := b.bucket.GetRange(ctx, b.indexFilename(), off, length) + if err != nil { + return nil, errors.Wrap(err, "get range reader") + } + defer runutil.CloseWithLogOnErr(b.logger, r, "readIndexRange close range reader") + + c, err := ioutil.ReadAll(r) + if err != nil { + return nil, errors.Wrap(err, "read range") + } + return c, nil +} + +func (b *bucketBlock) readChunkRange(ctx context.Context, seq int, off, length int64) (*[]byte, error) { + c, err := b.chunkPool.Get(int(length)) + if err != nil { + return nil, errors.Wrap(err, "allocate chunk bytes") + } + buf := bytes.NewBuffer(*c) + + r, err := b.bucket.GetRange(ctx, b.chunkObjs[seq], off, length) + if err != nil { + return nil, errors.Wrap(err, "get range reader") + } + defer runutil.CloseWithLogOnErr(b.logger, r, "readChunkRange close range reader") + + if _, err = io.Copy(buf, r); err != nil { + return nil, errors.Wrap(err, "read range") + } + internalBuf := buf.Bytes() + return &internalBuf, nil +} + +func (b *bucketBlock) indexReader(ctx context.Context) *bucketIndexReader { + b.pendingReaders.Add(1) + return newBucketIndexReader(ctx, b.logger, b, b.indexCache) +} + +func (b *bucketBlock) chunkReader(ctx context.Context) *bucketChunkReader { + b.pendingReaders.Add(1) + return newBucketChunkReader(ctx, b) +} + +// Close waits for all pending readers to finish and then closes all underlying resources. +func (b *bucketBlock) Close() error { + b.pendingReaders.Wait() + return nil +} + +// bucketIndexReader is a custom index reader (not conforming index.Reader interface) that gets postings +// by +type bucketIndexReader struct { + logger log.Logger + ctx context.Context + block *bucketBlock + dec *index.Decoder + stats *queryStats + cache indexCache + + mtx sync.Mutex + loadedSeries map[uint64][]byte +} + +func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache indexCache) *bucketIndexReader { + r := &bucketIndexReader{ + logger: logger, + ctx: ctx, + block: block, + dec: &index.Decoder{}, + stats: &queryStats{}, + cache: cache, + loadedSeries: map[uint64][]byte{}, + } + r.dec.LookupSymbol = r.lookupSymbol + return r +} + +func (r *bucketIndexReader) lookupSymbol(o uint32) (string, error) { + idx := int(o) + if idx >= len(r.block.symbols) { + return "", errors.Errorf("bucketIndexReader: unknown symbol offset %d", o) + } + + return r.block.symbols[idx], nil +} + +// ExpandedPostings returns postings in expanded list instead of index.Postings. +// This is because we need to have them buffered anyway to perform efficient lookup +// on object storage. +// Found posting IDs (ps) are not strictly required to point to a valid Series, e.g. during +// background garbage collections. +// +// Reminder: A posting is a reference (represented as a uint64) to a series reference, which in turn points to the first +// chunk where the series contains the matching label-value pair for a given block of data. Postings can be fetched by +// single label name=value. +func (r *bucketIndexReader) ExpandedPostings(ms []labels.Matcher) ([]uint64, error) { + var postingGroups []*postingGroup + + // NOTE: Derived from tsdb.PostingsForMatchers. + for _, m := range ms { + // Each group is separate to tell later what postings are intersecting with what. + postingGroups = append(postingGroups, toPostingGroup(r.LabelValues, m)) + } + + if len(postingGroups) == 0 { + return nil, nil + } + + if err := r.fetchPostings(postingGroups); err != nil { + return nil, errors.Wrap(err, "get postings") + } + + var postings []index.Postings + for _, g := range postingGroups { + postings = append(postings, g.Postings()) + } + + ps, err := index.ExpandPostings(index.Intersect(postings...)) + if err != nil { + return nil, errors.Wrap(err, "expand") + } + + // As of version two all series entries are 16 byte padded. All references + // we get have to account for that to get the correct offset. + if r.block.indexVersion >= 2 { + for i, id := range ps { + ps[i] = id * 16 + } + } + + return ps, nil +} + +type postingGroup struct { + keys labels.Labels + postings []index.Postings + + aggregate func(postings []index.Postings) index.Postings +} + +func newPostingGroup(keys labels.Labels, aggr func(postings []index.Postings) index.Postings) *postingGroup { + return &postingGroup{ + keys: keys, + postings: make([]index.Postings, len(keys)), + aggregate: aggr, + } +} + +func (p *postingGroup) Fill(i int, posting index.Postings) { + p.postings[i] = posting +} + +func (p *postingGroup) Postings() index.Postings { + if len(p.keys) == 0 { + return index.EmptyPostings() + } + + for i, posting := range p.postings { + if posting == nil { + // This should not happen. Debug for https://github.com/thanos-io/thanos/issues/874. + return index.ErrPostings(errors.Errorf("at least one of %d postings is nil for %s. It was never fetched.", i, p.keys[i])) + } + } + + return p.aggregate(p.postings) +} + +func merge(p []index.Postings) index.Postings { + return index.Merge(p...) +} + +func allWithout(p []index.Postings) index.Postings { + return index.Without(p[0], index.Merge(p[1:]...)) +} + +// NOTE: Derived from tsdb.postingsForMatcher. index.Merge is equivalent to map duplication. +func toPostingGroup(lvalsFn func(name string) []string, m labels.Matcher) *postingGroup { + var matchingLabels labels.Labels + + // If the matcher selects an empty value, it selects all the series which don't + // have the label name set too. See: https://github.com/prometheus/prometheus/issues/3575 + // and https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555 + if m.Matches("") { + allName, allValue := index.AllPostingsKey() + + matchingLabels = append(matchingLabels, labels.Label{Name: allName, Value: allValue}) + for _, val := range lvalsFn(m.Name()) { + if !m.Matches(val) { + matchingLabels = append(matchingLabels, labels.Label{Name: m.Name(), Value: val}) + } + } + + if len(matchingLabels) == 1 { + // This is known hack to return all series. + // Ask for x != . Allow for that as Prometheus does, + // even though it is expensive. + return newPostingGroup(matchingLabels, merge) + } + + return newPostingGroup(matchingLabels, allWithout) + } + + // Fast-path for equal matching. + if em, ok := m.(*labels.EqualMatcher); ok { + return newPostingGroup(labels.Labels{{Name: em.Name(), Value: em.Value()}}, merge) + } + + for _, val := range lvalsFn(m.Name()) { + if m.Matches(val) { + matchingLabels = append(matchingLabels, labels.Label{Name: m.Name(), Value: val}) + } + } + + return newPostingGroup(matchingLabels, merge) +} + +type postingPtr struct { + groupID int + keyID int + ptr index.Range +} + +// fetchPostings fill postings requested by posting groups. +func (r *bucketIndexReader) fetchPostings(groups []*postingGroup) error { + var ptrs []postingPtr + + // Iterate over all groups and fetch posting from cache. + // If we have a miss, mark key to be fetched in `ptrs` slice. + // Overlaps are well handled by partitioner, so we don't need to deduplicate keys. + for i, g := range groups { + for j, key := range g.keys { + // Get postings for the given key from cache first. + if b, ok := r.cache.Postings(r.block.meta.ULID, key); ok { + r.stats.postingsTouched++ + r.stats.postingsTouchedSizeSum += len(b) + + _, l, err := r.dec.Postings(b) + if err != nil { + return errors.Wrap(err, "decode postings") + } + g.Fill(j, l) + continue + } + + // Cache miss; save pointer for actual posting in index stored in object store. + ptr, ok := r.block.postings[key] + if !ok { + // This block does not have any posting for given key. + g.Fill(j, index.EmptyPostings()) + continue + } + + r.stats.postingsToFetch++ + ptrs = append(ptrs, postingPtr{ptr: ptr, groupID: i, keyID: j}) + } + } + + sort.Slice(ptrs, func(i, j int) bool { + return ptrs[i].ptr.Start < ptrs[j].ptr.Start + }) + + // TODO(bwplotka): Asses how large in worst case scenario this can be. (e.g fetch for AllPostingsKeys) + // Consider sub split if too big. + parts := r.block.partitioner.Partition(len(ptrs), func(i int) (start, end uint64) { + return uint64(ptrs[i].ptr.Start), uint64(ptrs[i].ptr.End) + }) + + var g run.Group + for _, part := range parts { + ctx, cancel := context.WithCancel(r.ctx) + i, j := part.elemRng[0], part.elemRng[1] + + start := int64(part.start) + // We assume index does not have any ptrs that has 0 length. + length := int64(part.end) - start + + // Fetch from object storage concurrently and update stats and posting list. + g.Add(func() error { + begin := time.Now() + + b, err := r.block.readIndexRange(ctx, start, length) + if err != nil { + return errors.Wrap(err, "read postings range") + } + fetchTime := time.Since(begin) + + r.mtx.Lock() + defer r.mtx.Unlock() + + r.stats.postingsFetchCount++ + r.stats.postingsFetched += j - i + r.stats.postingsFetchDurationSum += fetchTime + r.stats.postingsFetchedSizeSum += int(length) + + for _, p := range ptrs[i:j] { + c := b[p.ptr.Start-start : p.ptr.End-start] + + _, fetchedPostings, err := r.dec.Postings(c) + if err != nil { + return errors.Wrap(err, "read postings list") + } + + // Return postings and fill LRU cache. + groups[p.groupID].Fill(p.keyID, fetchedPostings) + r.cache.SetPostings(r.block.meta.ULID, groups[p.groupID].keys[p.keyID], c) + + // If we just fetched it we still have to update the stats for touched postings. + r.stats.postingsTouched++ + r.stats.postingsTouchedSizeSum += len(c) + } + return nil + }, func(err error) { + if err != nil { + cancel() + } + }) + } + + return g.Run() +} + +func (r *bucketIndexReader) PreloadSeries(ids []uint64) error { + const maxSeriesSize = 64 * 1024 + + var newIDs []uint64 + + for _, id := range ids { + if b, ok := r.cache.Series(r.block.meta.ULID, id); ok { + r.loadedSeries[id] = b + continue + } + newIDs = append(newIDs, id) + } + ids = newIDs + + parts := r.block.partitioner.Partition(len(ids), func(i int) (start, end uint64) { + return ids[i], ids[i] + maxSeriesSize + }) + var g run.Group + + for _, p := range parts { + ctx, cancel := context.WithCancel(r.ctx) + s, e := p.start, p.end + i, j := p.elemRng[0], p.elemRng[1] + + g.Add(func() error { + return r.loadSeries(ctx, ids[i:j], s, e) + }, func(err error) { + if err != nil { + cancel() + } + }) + } + return g.Run() +} + +func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []uint64, start, end uint64) error { + begin := time.Now() + + b, err := r.block.readIndexRange(ctx, int64(start), int64(end-start)) + if err != nil { + return errors.Wrap(err, "read series range") + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + r.stats.seriesFetchCount++ + r.stats.seriesFetched += len(ids) + r.stats.seriesFetchDurationSum += time.Since(begin) + r.stats.seriesFetchedSizeSum += int(end - start) + + for _, id := range ids { + c := b[id-start:] + + l, n := binary.Uvarint(c) + if n < 1 { + return errors.New("reading series length failed") + } + if len(c) < n+int(l) { + return errors.Errorf("invalid remaining size %d, expected %d", len(c), n+int(l)) + } + c = c[n : n+int(l)] + r.loadedSeries[id] = c + r.cache.SetSeries(r.block.meta.ULID, id, c) + } + return nil +} + +type part struct { + start uint64 + end uint64 + + elemRng [2]int +} + +type partitioner interface { + // Partition partitions length entries into n <= length ranges that cover all + // input ranges + // It supports overlapping ranges. + // NOTE: It expects range to be sorted by start time. + Partition(length int, rng func(int) (uint64, uint64)) []part +} + +type gapBasedPartitioner struct { + maxGapSize uint64 +} + +// Partition partitions length entries into n <= length ranges that cover all +// input ranges by combining entries that are separated by reasonably small gaps. +// It is used to combine multiple small ranges from object storage into bigger, more efficient/cheaper ones. +func (g gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64)) (parts []part) { + j := 0 + k := 0 + for k < length { + j = k + k++ + + p := part{} + p.start, p.end = rng(j) + + // Keep growing the range until the end or we encounter a large gap. + for ; k < length; k++ { + s, e := rng(k) + + if p.end+g.maxGapSize < s { + break + } + + if p.end <= e { + p.end = e + } + } + p.elemRng = [2]int{j, k} + parts = append(parts, p) + } + return parts +} + +// LoadedSeries populates the given labels and chunk metas for the series identified +// by the reference. +// Returns ErrNotFound if the ref does not resolve to a known series. +func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { + b, ok := r.loadedSeries[ref] + if !ok { + return errors.Errorf("series %d not found", ref) + } + + r.stats.seriesTouched++ + r.stats.seriesTouchedSizeSum += len(b) + + return r.dec.Series(b, lset, chks) +} + +// LabelValues returns label values for single name. +func (r *bucketIndexReader) LabelValues(name string) []string { + res := make([]string, 0, len(r.block.lvals[name])) + return append(res, r.block.lvals[name]...) +} + +// LabelNames returns a list of label names. +func (r *bucketIndexReader) LabelNames() []string { + res := make([]string, 0, len(r.block.lvals)) + for ln := range r.block.lvals { + res = append(res, ln) + } + return res +} + +// Close released the underlying resources of the reader. +func (r *bucketIndexReader) Close() error { + r.block.pendingReaders.Done() + return nil +} + +type bucketChunkReader struct { + ctx context.Context + block *bucketBlock + stats *queryStats + + preloads [][]uint32 + mtx sync.Mutex + chunks map[uint64]chunkenc.Chunk + + // Byte slice to return to the chunk pool on close. + chunkBytes []*[]byte +} + +func newBucketChunkReader(ctx context.Context, block *bucketBlock) *bucketChunkReader { + return &bucketChunkReader{ + ctx: ctx, + block: block, + stats: &queryStats{}, + preloads: make([][]uint32, len(block.chunkObjs)), + chunks: map[uint64]chunkenc.Chunk{}, + } +} + +// addPreload adds the chunk with id to the data set that will be fetched on calling preload. +func (r *bucketChunkReader) addPreload(id uint64) error { + var ( + seq = int(id >> 32) + off = uint32(id) + ) + if seq >= len(r.preloads) { + return errors.Errorf("reference sequence %d out of range", seq) + } + r.preloads[seq] = append(r.preloads[seq], off) + return nil +} + +// preload all added chunk IDs. Must be called before the first call to Chunk is made. +func (r *bucketChunkReader) preload(samplesLimiter *Limiter) error { + const maxChunkSize = 16000 + + var g run.Group + + numChunks := uint64(0) + for _, offsets := range r.preloads { + for range offsets { + numChunks++ + } + } + if err := samplesLimiter.Check(numChunks * maxSamplesPerChunk); err != nil { + return errors.Wrap(err, "exceeded samples limit") + } + + for seq, offsets := range r.preloads { + sort.Slice(offsets, func(i, j int) bool { + return offsets[i] < offsets[j] + }) + parts := r.block.partitioner.Partition(len(offsets), func(i int) (start, end uint64) { + return uint64(offsets[i]), uint64(offsets[i]) + maxChunkSize + }) + + seq := seq + offsets := offsets + + for _, p := range parts { + ctx, cancel := context.WithCancel(r.ctx) + s, e := uint32(p.start), uint32(p.end) + m, n := p.elemRng[0], p.elemRng[1] + + g.Add(func() error { + return r.loadChunks(ctx, offsets[m:n], seq, s, e) + }, func(err error) { + if err != nil { + cancel() + } + }) + } + } + return g.Run() +} + +func (r *bucketChunkReader) loadChunks(ctx context.Context, offs []uint32, seq int, start, end uint32) error { + begin := time.Now() + + b, err := r.block.readChunkRange(ctx, seq, int64(start), int64(end-start)) + if err != nil { + return errors.Wrapf(err, "read range for %d", seq) + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + r.chunkBytes = append(r.chunkBytes, b) + r.stats.chunksFetchCount++ + r.stats.chunksFetched += len(offs) + r.stats.chunksFetchDurationSum += time.Since(begin) + r.stats.chunksFetchedSizeSum += int(end - start) + + for _, o := range offs { + cb := (*b)[o-start:] + + l, n := binary.Uvarint(cb) + if n < 1 { + return errors.Errorf("reading chunk length failed") + } + if len(cb) < n+int(l)+1 { + return errors.Errorf("preloaded chunk too small, expecting %d", n+int(l)+1) + } + cid := uint64(seq<<32) | uint64(o) + r.chunks[cid] = rawChunk(cb[n : n+int(l)+1]) + } + return nil +} + +func (r *bucketChunkReader) Chunk(id uint64) (chunkenc.Chunk, error) { + c, ok := r.chunks[id] + if !ok { + return nil, errors.Errorf("chunk with ID %d not found", id) + } + + r.stats.chunksTouched++ + r.stats.chunksTouchedSizeSum += len(c.Bytes()) + + return c, nil +} + +// rawChunk is a helper type that wraps a chunk's raw bytes and implements the chunkenc.Chunk +// interface over it. +// It is used to Store API responses which don't need to introspect and validate the chunk's contents. +type rawChunk []byte + +func (b rawChunk) Encoding() chunkenc.Encoding { + return chunkenc.Encoding(b[0]) +} + +func (b rawChunk) Bytes() []byte { + return b[1:] +} + +func (b rawChunk) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + panic("invalid call") +} + +func (b rawChunk) Appender() (chunkenc.Appender, error) { + panic("invalid call") +} + +func (b rawChunk) NumSamples() int { + panic("invalid call") +} + +func (r *bucketChunkReader) Close() error { + r.block.pendingReaders.Done() + + for _, b := range r.chunkBytes { + r.block.chunkPool.Put(b) + } + return nil +} + +type queryStats struct { + blocksQueried int + + postingsTouched int + postingsTouchedSizeSum int + postingsToFetch int + postingsFetched int + postingsFetchedSizeSum int + postingsFetchCount int + postingsFetchDurationSum time.Duration + + seriesTouched int + seriesTouchedSizeSum int + seriesFetched int + seriesFetchedSizeSum int + seriesFetchCount int + seriesFetchDurationSum time.Duration + + chunksTouched int + chunksTouchedSizeSum int + chunksFetched int + chunksFetchedSizeSum int + chunksFetchCount int + chunksFetchDurationSum time.Duration + + getAllDuration time.Duration + mergedSeriesCount int + mergedChunksCount int + mergeDuration time.Duration +} + +func (s queryStats) merge(o *queryStats) *queryStats { + s.blocksQueried += o.blocksQueried + + s.postingsTouched += o.postingsTouched + s.postingsTouchedSizeSum += o.postingsTouchedSizeSum + s.postingsFetched += o.postingsFetched + s.postingsFetchedSizeSum += o.postingsFetchedSizeSum + s.postingsFetchCount += o.postingsFetchCount + s.postingsFetchDurationSum += o.postingsFetchDurationSum + + s.seriesTouched += o.seriesTouched + s.seriesTouchedSizeSum += o.seriesTouchedSizeSum + s.seriesFetched += o.seriesFetched + s.seriesFetchedSizeSum += o.seriesFetchedSizeSum + s.seriesFetchCount += o.seriesFetchCount + s.seriesFetchDurationSum += o.seriesFetchDurationSum + + s.chunksTouched += o.chunksTouched + s.chunksTouchedSizeSum += o.chunksTouchedSizeSum + s.chunksFetched += o.chunksFetched + s.chunksFetchedSizeSum += o.chunksFetchedSizeSum + s.chunksFetchCount += o.chunksFetchCount + s.chunksFetchDurationSum += o.chunksFetchDurationSum + + s.getAllDuration += o.getAllDuration + s.mergedSeriesCount += o.mergedSeriesCount + s.mergedChunksCount += o.mergedChunksCount + s.mergeDuration += o.mergeDuration + + return &s +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go new file mode 100644 index 0000000000..e945a94458 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/cache.go @@ -0,0 +1,292 @@ +package storecache + +import ( + "math" + "sync" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + lru "github.com/hashicorp/golang-lru/simplelru" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb/labels" +) + +const ( + cacheTypePostings string = "Postings" + cacheTypeSeries string = "Series" + + sliceHeaderSize = 16 +) + +type cacheKey struct { + block ulid.ULID + key interface{} +} + +func (c cacheKey) keyType() string { + switch c.key.(type) { + case cacheKeyPostings: + return cacheTypePostings + case cacheKeySeries: + return cacheTypeSeries + } + return "" +} + +func (c cacheKey) size() uint64 { + switch k := c.key.(type) { + case cacheKeyPostings: + // ULID + 2 slice headers + number of chars in value and name. + return 16 + 2*sliceHeaderSize + uint64(len(k.Value)+len(k.Name)) + case cacheKeySeries: + return 16 + 8 // ULID + uint64 + } + return 0 +} + +type cacheKeyPostings labels.Label +type cacheKeySeries uint64 + +type IndexCache struct { + mtx sync.Mutex + + logger log.Logger + lru *lru.LRU + maxSizeBytes uint64 + maxItemSizeBytes uint64 + + curSize uint64 + + evicted *prometheus.CounterVec + requests *prometheus.CounterVec + hits *prometheus.CounterVec + added *prometheus.CounterVec + current *prometheus.GaugeVec + currentSize *prometheus.GaugeVec + totalCurrentSize *prometheus.GaugeVec + overflow *prometheus.CounterVec +} + +type Opts struct { + // MaxSizeBytes represents overall maximum number of bytes cache can contain. + MaxSizeBytes uint64 + // MaxItemSizeBytes represents maximum size of single item. + MaxItemSizeBytes uint64 +} + +// NewIndexCache creates a new thread-safe LRU cache for index entries and ensures the total cache +// size approximately does not exceed maxBytes. +func NewIndexCache(logger log.Logger, reg prometheus.Registerer, opts Opts) (*IndexCache, error) { + if opts.MaxItemSizeBytes > opts.MaxSizeBytes { + return nil, errors.Errorf("max item size (%v) cannot be bigger than overall cache size (%v)", opts.MaxItemSizeBytes, opts.MaxSizeBytes) + } + + c := &IndexCache{ + logger: logger, + maxSizeBytes: opts.MaxSizeBytes, + maxItemSizeBytes: opts.MaxItemSizeBytes, + } + + c.evicted = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_items_evicted_total", + Help: "Total number of items that were evicted from the index cache.", + }, []string{"item_type"}) + c.evicted.WithLabelValues(cacheTypePostings) + c.evicted.WithLabelValues(cacheTypeSeries) + + c.added = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_items_added_total", + Help: "Total number of items that were added to the index cache.", + }, []string{"item_type"}) + c.added.WithLabelValues(cacheTypePostings) + c.added.WithLabelValues(cacheTypeSeries) + + c.requests = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_requests_total", + Help: "Total number of requests to the cache.", + }, []string{"item_type"}) + c.requests.WithLabelValues(cacheTypePostings) + c.requests.WithLabelValues(cacheTypeSeries) + + c.overflow = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_items_overflowed_total", + Help: "Total number of items that could not be added to the cache due to being too big.", + }, []string{"item_type"}) + c.overflow.WithLabelValues(cacheTypePostings) + c.overflow.WithLabelValues(cacheTypeSeries) + + c.hits = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_hits_total", + Help: "Total number of requests to the cache that were a hit.", + }, []string{"item_type"}) + c.hits.WithLabelValues(cacheTypePostings) + c.hits.WithLabelValues(cacheTypeSeries) + + c.current = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "thanos_store_index_cache_items", + Help: "Current number of items in the index cache.", + }, []string{"item_type"}) + c.current.WithLabelValues(cacheTypePostings) + c.current.WithLabelValues(cacheTypeSeries) + + c.currentSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "thanos_store_index_cache_items_size_bytes", + Help: "Current byte size of items in the index cache.", + }, []string{"item_type"}) + c.currentSize.WithLabelValues(cacheTypePostings) + c.currentSize.WithLabelValues(cacheTypeSeries) + + c.totalCurrentSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "thanos_store_index_cache_total_size_bytes", + Help: "Current byte size of items (both value and key) in the index cache.", + }, []string{"item_type"}) + c.totalCurrentSize.WithLabelValues(cacheTypePostings) + c.totalCurrentSize.WithLabelValues(cacheTypeSeries) + + if reg != nil { + reg.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "thanos_store_index_cache_max_size_bytes", + Help: "Maximum number of bytes to be held in the index cache.", + }, func() float64 { + return float64(c.maxSizeBytes) + })) + reg.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "thanos_store_index_cache_max_item_size_bytes", + Help: "Maximum number of bytes for single entry to be held in the index cache.", + }, func() float64 { + return float64(c.maxItemSizeBytes) + })) + reg.MustRegister(c.requests, c.hits, c.added, c.evicted, c.current, c.currentSize, c.totalCurrentSize, c.overflow) + } + + // Initialize LRU cache with a high size limit since we will manage evictions ourselves + // based on stored size using `RemoveOldest` method. + l, err := lru.NewLRU(math.MaxInt64, c.onEvict) + if err != nil { + return nil, err + } + c.lru = l + + level.Info(logger).Log( + "msg", "created index cache", + "maxItemSizeBytes", c.maxItemSizeBytes, + "maxSizeBytes", c.maxSizeBytes, + "maxItems", "math.MaxInt64", + ) + return c, nil +} + +func (c *IndexCache) onEvict(key, val interface{}) { + k := key.(cacheKey).keyType() + entrySize := sliceHeaderSize + uint64(len(val.([]byte))) + + c.evicted.WithLabelValues(string(k)).Inc() + c.current.WithLabelValues(string(k)).Dec() + c.currentSize.WithLabelValues(string(k)).Sub(float64(entrySize)) + c.totalCurrentSize.WithLabelValues(string(k)).Sub(float64(entrySize + key.(cacheKey).size())) + + c.curSize -= entrySize +} + +func (c *IndexCache) get(typ string, key cacheKey) ([]byte, bool) { + c.requests.WithLabelValues(typ).Inc() + + c.mtx.Lock() + defer c.mtx.Unlock() + + v, ok := c.lru.Get(key) + if !ok { + return nil, false + } + c.hits.WithLabelValues(typ).Inc() + return v.([]byte), true +} + +func (c *IndexCache) set(typ string, key cacheKey, val []byte) { + var size = sliceHeaderSize + uint64(len(val)) + + c.mtx.Lock() + defer c.mtx.Unlock() + + if _, ok := c.lru.Get(key); ok { + return + } + + if !c.ensureFits(size, typ) { + c.overflow.WithLabelValues(typ).Inc() + return + } + + // The caller may be passing in a sub-slice of a huge array. Copy the data + // to ensure we don't waste huge amounts of space for something small. + v := make([]byte, len(val)) + copy(v, val) + c.lru.Add(key, v) + + c.added.WithLabelValues(typ).Inc() + c.currentSize.WithLabelValues(typ).Add(float64(size)) + c.totalCurrentSize.WithLabelValues(typ).Add(float64(size + key.size())) + c.current.WithLabelValues(typ).Inc() + c.curSize += size +} + +// ensureFits tries to make sure that the passed slice will fit into the LRU cache. +// Returns true if it will fit. +func (c *IndexCache) ensureFits(size uint64, typ string) bool { + if size > c.maxItemSizeBytes { + level.Debug(c.logger).Log( + "msg", "item bigger than maxItemSizeBytes. Ignoring..", + "maxItemSizeBytes", c.maxItemSizeBytes, + "maxSizeBytes", c.maxSizeBytes, + "curSize", c.curSize, + "itemSize", size, + "cacheType", typ, + ) + return false + } + + for c.curSize+size > c.maxSizeBytes { + if _, _, ok := c.lru.RemoveOldest(); !ok { + level.Error(c.logger).Log( + "msg", "LRU has nothing more to evict, but we still cannot allocate the item. Resetting cache.", + "maxItemSizeBytes", c.maxItemSizeBytes, + "maxSizeBytes", c.maxSizeBytes, + "curSize", c.curSize, + "itemSize", size, + "cacheType", typ, + ) + c.reset() + } + } + return true +} + +func (c *IndexCache) reset() { + c.lru.Purge() + c.current.Reset() + c.currentSize.Reset() + c.totalCurrentSize.Reset() + c.curSize = 0 +} + +// SetPostings sets the postings identfied by the ulid and label to the value v, +// if the postings already exists in the cache it is not mutated. +func (c *IndexCache) SetPostings(b ulid.ULID, l labels.Label, v []byte) { + c.set(cacheTypePostings, cacheKey{b, cacheKeyPostings(l)}, v) +} + +func (c *IndexCache) Postings(b ulid.ULID, l labels.Label) ([]byte, bool) { + return c.get(cacheTypePostings, cacheKey{b, cacheKeyPostings(l)}) +} + +// SetSeries sets the series identfied by the ulid and id to the value v, +// if the series already exists in the cache it is not mutated. +func (c *IndexCache) SetSeries(b ulid.ULID, id uint64, v []byte) { + c.set(cacheTypeSeries, cacheKey{b, cacheKeySeries(id)}, v) +} + +func (c *IndexCache) Series(b ulid.ULID, id uint64) ([]byte, bool) { + return c.get(cacheTypeSeries, cacheKey{b, cacheKeySeries(id)}) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/gate.go b/vendor/github.com/thanos-io/thanos/pkg/store/gate.go new file mode 100644 index 0000000000..cdb9ea3712 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/gate.go @@ -0,0 +1,61 @@ +package store + +import ( + "context" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/gate" +) + +// Gate wraps the Prometheus gate with extra metrics. +type Gate struct { + g *gate.Gate + inflightQueries prometheus.Gauge + gateTiming prometheus.Histogram +} + +// NewGate returns a new query gate. +func NewGate(maxConcurrent int, reg prometheus.Registerer) *Gate { + g := &Gate{ + g: gate.New(maxConcurrent), + inflightQueries: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gate_queries_in_flight", + Help: "Number of queries that are currently in flight.", + }), + gateTiming: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "gate_duration_seconds", + Help: "How many seconds it took for queries to wait at the gate.", + Buckets: []float64{ + 0.01, 0.05, 0.1, 0.25, 0.6, 1, 2, 3.5, 5, 10, + }, + }), + } + + if reg != nil { + reg.MustRegister(g.inflightQueries, g.gateTiming) + } + + return g +} + +// IsMyTurn iniates a new query and waits until it's our turn to fulfill a query request. +func (g *Gate) IsMyTurn(ctx context.Context) error { + start := time.Now() + defer func() { + g.gateTiming.Observe(float64(time.Since(start))) + }() + + if err := g.g.Start(ctx); err != nil { + return err + } + + g.inflightQueries.Inc() + return nil +} + +// Done finishes a query. +func (g *Gate) Done() { + g.inflightQueries.Dec() + g.g.Done() +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go new file mode 100644 index 0000000000..2c332a2c6b --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go @@ -0,0 +1,31 @@ +package store + +import ( + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +// Limiter is a simple mechanism for checking if something has passed a certain threshold. +type Limiter struct { + limit uint64 + + // Counter metric which we will increase if Check() fails. + failedCounter prometheus.Counter +} + +// NewLimiter returns a new limiter with a specified limit. 0 disables the limit. +func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter { + return &Limiter{limit: limit, failedCounter: ctr} +} + +// Check checks if the passed number exceeds the limits or not. +func (l *Limiter) Check(num uint64) error { + if l.limit == 0 { + return nil + } + if num > l.limit { + l.failedCounter.Inc() + return errors.Errorf("limit %v violated (got %v)", l.limit, num) + } + return nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/matchers.go b/vendor/github.com/thanos-io/thanos/pkg/store/matchers.go new file mode 100644 index 0000000000..437576d603 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/matchers.go @@ -0,0 +1,39 @@ +package store + +import ( + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +func translateMatcher(m storepb.LabelMatcher) (labels.Matcher, error) { + switch m.Type { + case storepb.LabelMatcher_EQ: + return labels.NewEqualMatcher(m.Name, m.Value), nil + + case storepb.LabelMatcher_NEQ: + return labels.Not(labels.NewEqualMatcher(m.Name, m.Value)), nil + + case storepb.LabelMatcher_RE: + return labels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$") + + case storepb.LabelMatcher_NRE: + m, err := labels.NewRegexpMatcher(m.Name, "^(?:"+m.Value+")$") + if err != nil { + return nil, err + } + return labels.Not(m), nil + } + return nil, errors.Errorf("unknown label matcher type %d", m.Type) +} + +func translateMatchers(ms []storepb.LabelMatcher) (res []labels.Matcher, err error) { + for _, m := range ms { + r, err := translateMatcher(m) + if err != nil { + return nil, err + } + res = append(res, r) + } + return res, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go new file mode 100644 index 0000000000..66460d1b49 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -0,0 +1,604 @@ +package store + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "path" + "sort" + "strings" + "sync" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/tracing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var statusToCode = map[int]codes.Code{ + http.StatusBadRequest: codes.InvalidArgument, + http.StatusNotFound: codes.NotFound, + http.StatusUnprocessableEntity: codes.Internal, + http.StatusServiceUnavailable: codes.Unavailable, + http.StatusInternalServerError: codes.Internal, +} + +// PrometheusStore implements the store node API on top of the Prometheus remote read API. +type PrometheusStore struct { + logger log.Logger + base *url.URL + client *http.Client + buffers sync.Pool + component component.StoreAPI + externalLabels func() labels.Labels + timestamps func() (mint int64, maxt int64) +} + +// NewPrometheusStore returns a new PrometheusStore that uses the given HTTP client +// to talk to Prometheus. +// It attaches the provided external labels to all results. +func NewPrometheusStore( + logger log.Logger, + client *http.Client, + baseURL *url.URL, + component component.StoreAPI, + externalLabels func() labels.Labels, + timestamps func() (mint int64, maxt int64), +) (*PrometheusStore, error) { + if logger == nil { + logger = log.NewNopLogger() + } + if client == nil { + client = &http.Client{ + Transport: tracing.HTTPTripperware(logger, http.DefaultTransport), + } + } + p := &PrometheusStore{ + logger: logger, + base: baseURL, + client: client, + component: component, + externalLabels: externalLabels, + timestamps: timestamps, + } + return p, nil +} + +// Info returns store information about the Prometheus instance. +// NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. +// This is fine for now, but might be needed in future. +func (p *PrometheusStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { + lset := p.externalLabels() + mint, maxt := p.timestamps() + + res := &storepb.InfoResponse{ + Labels: make([]storepb.Label, 0, len(lset)), + StoreType: p.component.ToProto(), + MinTime: mint, + MaxTime: maxt, + } + for _, l := range lset { + res.Labels = append(res.Labels, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []storepb.LabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, storepb.LabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + +func (p *PrometheusStore) getBuffer() *[]byte { + b := p.buffers.Get() + if b == nil { + buf := make([]byte, 0, 32*1024) // 32KB seems like a good minimum starting size. + return &buf + } + return b.(*[]byte) +} + +func (p *PrometheusStore) putBuffer(b *[]byte) { + p.buffers.Put(b) +} + +// Series returns all series for a requested time range and label matcher. +func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_SeriesServer) error { + externalLabels := p.externalLabels() + + match, newMatchers, err := matchesExternalLabels(r.Matchers, externalLabels) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + if !match { + return nil + } + + if len(newMatchers) == 0 { + return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) + } + + q := &prompb.Query{StartTimestampMs: r.MinTime, EndTimestampMs: r.MaxTime} + + for _, m := range newMatchers { + pm := &prompb.LabelMatcher{Name: m.Name, Value: m.Value} + + switch m.Type { + case storepb.LabelMatcher_EQ: + pm.Type = prompb.LabelMatcher_EQ + case storepb.LabelMatcher_NEQ: + pm.Type = prompb.LabelMatcher_NEQ + case storepb.LabelMatcher_RE: + pm.Type = prompb.LabelMatcher_RE + case storepb.LabelMatcher_NRE: + pm.Type = prompb.LabelMatcher_NRE + default: + return errors.New("unrecognized matcher type") + } + q.Matchers = append(q.Matchers, pm) + } + + queryPrometheusSpan, ctx := tracing.StartSpan(s.Context(), "query_prometheus") + + httpResp, err := p.startPromSeries(ctx, q) + if err != nil { + queryPrometheusSpan.Finish() + return errors.Wrap(err, "query Prometheus") + } + + // Negotiate content. We requested streamed chunked response type, but still we need to support old versions of + // remote read. + contentType := httpResp.Header.Get("Content-Type") + if strings.HasPrefix(contentType, "application/x-protobuf") { + return p.handleSampledPrometheusResponse(s, httpResp, queryPrometheusSpan, externalLabels) + } + + if !strings.HasPrefix(contentType, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") { + return errors.Errorf("not supported remote read content type: %s", contentType) + } + return p.handleStreamedPrometheusResponse(s, httpResp, queryPrometheusSpan, externalLabels) +} + +func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, externalLabels labels.Labels) error { + ctx := s.Context() + + level.Debug(p.logger).Log("msg", "started handling ReadRequest_SAMPLED response type.") + + resp, err := p.fetchSampledResponse(ctx, httpResp) + querySpan.Finish() + if err != nil { + return err + } + + span, _ := tracing.StartSpan(ctx, "transform_and_respond") + defer span.Finish() + span.SetTag("series_count", len(resp.Results[0].Timeseries)) + + for _, e := range resp.Results[0].Timeseries { + lset := p.translateAndExtendLabels(e.Labels, externalLabels) + + if len(e.Samples) == 0 { + // As found in https://github.com/thanos-io/thanos/issues/381 + // Prometheus can give us completely empty time series. Ignore these with log until we figure out that + // this is expected from Prometheus perspective. + level.Warn(p.logger).Log( + "msg", + "found timeseries without any chunk. See https://github.com/thanos-io/thanos/issues/381 for details", + "lset", + fmt.Sprintf("%v", lset), + ) + continue + } + + // XOR encoding supports a max size of 2^16 - 1 samples, so we need + // to chunk all samples into groups of no more than 2^16 - 1 + // See: https://github.com/thanos-io/thanos/pull/718 + aggregatedChunks, err := p.chunkSamples(e, math.MaxUint16) + if err != nil { + return err + } + + if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ + Labels: lset, + Chunks: aggregatedChunks, + })); err != nil { + return err + } + } + level.Debug(p.logger).Log("msg", "handled ReadRequest_SAMPLED request.", "series", len(resp.Results[0].Timeseries)) + return nil +} + +func (p *PrometheusStore) handleStreamedPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, externalLabels labels.Labels) error { + level.Debug(p.logger).Log("msg", "started handling ReadRequest_STREAMED_XOR_CHUNKS streamed read response.") + + framesNum := 0 + seriesNum := 0 + + defer func() { + querySpan.SetTag("frames", framesNum) + querySpan.SetTag("series", seriesNum) + querySpan.Finish() + }() + defer runutil.CloseWithLogOnErr(p.logger, httpResp.Body, "prom series request body") + + var ( + lastSeries string + currSeries string + tmp []string + data = p.getBuffer() + ) + defer p.putBuffer(data) + + // TODO(bwplotka): Put read limit as a flag. + stream := remote.NewChunkedReader(httpResp.Body, remote.DefaultChunkedReadLimit, *data) + for { + res := &prompb.ChunkedReadResponse{} + err := stream.NextProto(res) + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "next proto") + } + + if len(res.ChunkedSeries) != 1 { + level.Warn(p.logger).Log("msg", "Prometheus ReadRequest_STREAMED_XOR_CHUNKS returned non 1 series in frame", "series", len(res.ChunkedSeries)) + } + + framesNum++ + for _, series := range res.ChunkedSeries { + { + // Calculate hash of series for counting. + tmp = tmp[:0] + for _, l := range series.Labels { + tmp = append(tmp, l.String()) + } + currSeries = strings.Join(tmp, ";") + if currSeries != lastSeries { + seriesNum++ + lastSeries = currSeries + } + } + + thanosChks := make([]storepb.AggrChunk, len(series.Chunks)) + for i, chk := range series.Chunks { + thanosChks[i] = storepb.AggrChunk{ + MaxTime: chk.MaxTimeMs, + MinTime: chk.MinTimeMs, + Raw: &storepb.Chunk{ + Data: chk.Data, + // Prometheus ChunkEncoding vs ours https://github.com/thanos-io/thanos/blob/master/pkg/store/storepb/types.proto#L19 + // has one difference. Prometheus has Chunk_UNKNOWN Chunk_Encoding = 0 vs we start from + // XOR as 0. Compensate for that here: + Type: storepb.Chunk_Encoding(chk.Type - 1), + }, + } + // Drop the reference to data from non protobuf for GC. + series.Chunks[i].Data = nil + } + + if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ + Labels: p.translateAndExtendLabels(series.Labels, externalLabels), + Chunks: thanosChks, + })); err != nil { + return err + } + } + } + level.Debug(p.logger).Log("msg", "handled ReadRequest_STREAMED_XOR_CHUNKS request.", "frames", framesNum, "series", seriesNum) + return nil +} + +func (p *PrometheusStore) fetchSampledResponse(ctx context.Context, resp *http.Response) (*prompb.ReadResponse, error) { + defer runutil.ExhaustCloseWithLogOnErr(p.logger, resp.Body, "prom series request body") + + b := p.getBuffer() + buf := bytes.NewBuffer(*b) + defer p.putBuffer(b) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, errors.Wrap(err, "copy response") + } + spanSnappyDecode, ctx := tracing.StartSpan(ctx, "decompress_response") + sb := p.getBuffer() + decomp, err := snappy.Decode(*sb, buf.Bytes()) + spanSnappyDecode.Finish() + defer p.putBuffer(sb) + if err != nil { + return nil, errors.Wrap(err, "decompress response") + } + + var data prompb.ReadResponse + spanUnmarshal, _ := tracing.StartSpan(ctx, "unmarshal_response") + if err := proto.Unmarshal(decomp, &data); err != nil { + return nil, errors.Wrap(err, "unmarshal response") + } + spanUnmarshal.Finish() + if len(data.Results) != 1 { + return nil, errors.Errorf("unexpected result size %d", len(data.Results)) + } + + return &data, nil +} + +func (p *PrometheusStore) chunkSamples(series *prompb.TimeSeries, maxSamplesPerChunk int) (chks []storepb.AggrChunk, err error) { + samples := series.Samples + + for len(samples) > 0 { + chunkSize := len(samples) + if chunkSize > maxSamplesPerChunk { + chunkSize = maxSamplesPerChunk + } + + enc, cb, err := p.encodeChunk(samples[:chunkSize]) + if err != nil { + return nil, status.Error(codes.Unknown, err.Error()) + } + + chks = append(chks, storepb.AggrChunk{ + MinTime: int64(samples[0].Timestamp), + MaxTime: int64(samples[chunkSize-1].Timestamp), + Raw: &storepb.Chunk{Type: enc, Data: cb}, + }) + + samples = samples[chunkSize:] + } + + return chks, nil +} + +func (p *PrometheusStore) startPromSeries(ctx context.Context, q *prompb.Query) (*http.Response, error) { + reqb, err := proto.Marshal(&prompb.ReadRequest{ + Queries: []*prompb.Query{q}, + AcceptedResponseTypes: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS}, + }) + if err != nil { + return nil, errors.Wrap(err, "marshal read request") + } + + u := *p.base + u.Path = path.Join(u.Path, "api/v1/read") + + preq, err := http.NewRequest("POST", u.String(), bytes.NewReader(snappy.Encode(nil, reqb))) + if err != nil { + return nil, errors.Wrap(err, "unable to create request") + } + preq.Header.Add("Content-Encoding", "snappy") + preq.Header.Set("Content-Type", "application/x-stream-protobuf") + spanReqDo, ctx := tracing.StartSpan(ctx, "query_prometheus_request") + preq = preq.WithContext(ctx) + presp, err := p.client.Do(preq) + if err != nil { + return nil, errors.Wrap(err, "send request") + } + spanReqDo.Finish() + if presp.StatusCode/100 != 2 { + // Best effort read. + b, err := ioutil.ReadAll(presp.Body) + if err != nil { + level.Error(p.logger).Log("msg", "failed to read response from non 2XX remote read request", "err", err) + } + _ = presp.Body.Close() + return nil, errors.Errorf("request failed with code %s; msg %s", presp.Status, string(b)) + } + + return presp, nil +} + +// matchesExternalLabels filters out external labels matching from matcher if exsits as the local storage does not have them. +// It also returns false if given matchers are not matching external labels. +func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []storepb.LabelMatcher, error) { + if len(externalLabels) == 0 { + return true, ms, nil + } + + var newMatcher []storepb.LabelMatcher + for _, m := range ms { + // Validate all matchers. + tm, err := translateMatcher(m) + if err != nil { + return false, nil, err + } + + extValue := externalLabels.Get(m.Name) + if extValue == "" { + // Agnostic to external labels. + newMatcher = append(newMatcher, m) + continue + } + + if !tm.Matches(extValue) { + // External label does not match. This should not happen - it should be filtered out on query node, + // but let's do that anyway here. + return false, nil, nil + } + } + + return true, newMatcher, nil +} + +// encodeChunk translates the sample pairs into a chunk. +func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) { + c := chunkenc.NewXORChunk() + + a, err := c.Appender() + if err != nil { + return 0, nil, err + } + for _, s := range ss { + a.Append(int64(s.Timestamp), float64(s.Value)) + } + return storepb.Chunk_XOR, c.Bytes(), nil +} + +// translateAndExtendLabels transforms a metrics into a protobuf label set. It additionally +// attaches the given labels to it, overwriting existing ones on colllision. +func (p *PrometheusStore) translateAndExtendLabels(m []prompb.Label, extend labels.Labels) []storepb.Label { + lset := make([]storepb.Label, 0, len(m)+len(extend)) + + for _, l := range m { + if extend.Get(l.Name) != "" { + continue + } + lset = append(lset, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + + return extendLset(lset, extend) +} + +func extendLset(lset []storepb.Label, extend labels.Labels) []storepb.Label { + for _, l := range extend { + lset = append(lset, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + sort.Slice(lset, func(i, j int) bool { + return lset[i].Name < lset[j].Name + }) + return lset +} + +// LabelNames returns all known label names. +func (p *PrometheusStore) LabelNames(ctx context.Context, _ *storepb.LabelNamesRequest) ( + *storepb.LabelNamesResponse, error, +) { + u := *p.base + u.Path = path.Join(u.Path, "/api/v1/labels") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + span, ctx := tracing.StartSpan(ctx, "/prom_label_names HTTP[client]") + defer span.Finish() + + resp, err := p.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer runutil.ExhaustCloseWithLogOnErr(p.logger, resp.Body, "label names request body") + + if resp.StatusCode/100 != 2 { + return nil, status.Error(codes.Internal, fmt.Sprintf("request Prometheus server failed, code %s", resp.Status)) + } + + if resp.StatusCode == http.StatusNoContent { + return &storepb.LabelNamesResponse{Names: []string{}}, nil + } + + var m struct { + Data []string `json:"data"` + Status string `json:"status"` + Error string `json:"error"` + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + if err = json.Unmarshal(body, &m); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + if m.Status != "success" { + code, exists := statusToCode[resp.StatusCode] + if !exists { + return nil, status.Error(codes.Internal, m.Error) + } + return nil, status.Error(code, m.Error) + } + + return &storepb.LabelNamesResponse{Names: m.Data}, nil +} + +// LabelValues returns all known label values for a given label name. +func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + externalLset := p.externalLabels() + + // First check for matching external label which has priority. + if l := externalLset.Get(r.Label); l != "" { + return &storepb.LabelValuesResponse{Values: []string{l}}, nil + } + + u := *p.base + u.Path = path.Join(u.Path, "/api/v1/label/", r.Label, "/values") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + span, ctx := tracing.StartSpan(ctx, "/prom_label_values HTTP[client]") + defer span.Finish() + + resp, err := p.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer runutil.ExhaustCloseWithLogOnErr(p.logger, resp.Body, "label values request body") + + if resp.StatusCode/100 != 2 { + return nil, status.Error(codes.Internal, fmt.Sprintf("request Prometheus server failed, code %s", resp.Status)) + } + + if resp.StatusCode == http.StatusNoContent { + return &storepb.LabelValuesResponse{Values: []string{}}, nil + } + + var m struct { + Data []string `json:"data"` + Status string `json:"status"` + Error string `json:"error"` + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + if err = json.Unmarshal(body, &m); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + sort.Strings(m.Data) + + if m.Status != "success" { + code, exists := statusToCode[resp.StatusCode] + if !exists { + return nil, status.Error(codes.Internal, m.Error) + } + return nil, status.Error(code, m.Error) + } + + return &storepb.LabelValuesResponse{Values: m.Data}, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go new file mode 100644 index 0000000000..907d25fd8a --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -0,0 +1,578 @@ +package store + +import ( + "context" + "fmt" + "io" + "math" + "sort" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/strutil" + "github.com/thanos-io/thanos/pkg/tracing" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Client holds meta information about a store. +type Client interface { + // Client to access the store. + storepb.StoreClient + + // LabelSets that each apply to some data exposed by the backing store. + LabelSets() []storepb.LabelSet + + // Minimum and maximum time range of data in the store. + TimeRange() (mint int64, maxt int64) + + String() string + // Addr returns address of a Client. + Addr() string +} + +// ProxyStore implements the store API that proxies request to all given underlying stores. +type ProxyStore struct { + logger log.Logger + stores func() []Client + component component.StoreAPI + selectorLabels labels.Labels + + responseTimeout time.Duration +} + +// NewProxyStore returns a new ProxyStore that uses the given clients that implements storeAPI to fan-in all series to the client. +// Note that there is no deduplication support. Deduplication should be done on the highest level (just before PromQL) +func NewProxyStore( + logger log.Logger, + stores func() []Client, + component component.StoreAPI, + selectorLabels labels.Labels, + responseTimeout time.Duration, +) *ProxyStore { + if logger == nil { + logger = log.NewNopLogger() + } + + s := &ProxyStore{ + logger: logger, + stores: stores, + component: component, + selectorLabels: selectorLabels, + responseTimeout: responseTimeout, + } + return s +} + +// Info returns store information about the external labels this store have. +func (s *ProxyStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { + res := &storepb.InfoResponse{ + Labels: make([]storepb.Label, 0, len(s.selectorLabels)), + StoreType: s.component.ToProto(), + } + + minTime := int64(math.MaxInt64) + maxTime := int64(0) + stores := s.stores() + + // Edge case: we have all of the data if there are no stores. + if len(stores) == 0 { + res.MaxTime = math.MaxInt64 + res.MinTime = 0 + + return res, nil + } + + for _, s := range stores { + mint, maxt := s.TimeRange() + if mint < minTime { + minTime = mint + } + if maxt > maxTime { + maxTime = maxt + } + } + + res.MaxTime = maxTime + res.MinTime = minTime + + for _, l := range s.selectorLabels { + res.Labels = append(res.Labels, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + + labelSets := make(map[uint64][]storepb.Label, len(stores)) + for _, st := range stores { + for _, labelSet := range st.LabelSets() { + mergedLabelSet := mergeLabels(labelSet.Labels, s.selectorLabels) + ls := storepb.LabelsToPromLabels(mergedLabelSet) + sort.Sort(ls) + labelSets[ls.Hash()] = mergedLabelSet + } + } + + res.LabelSets = make([]storepb.LabelSet, 0, len(labelSets)) + for _, v := range labelSets { + res.LabelSets = append(res.LabelSets, storepb.LabelSet{Labels: v}) + } + + // We always want to enforce announcing the subset of data that + // selector-labels represents. If no label-sets are announced by the + // store-proxy's discovered stores, then we still want to enforce + // announcing this subset by announcing the selector as the label-set. + if len(res.LabelSets) == 0 && len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, storepb.LabelSet{Labels: res.Labels}) + } + + return res, nil +} + +// mergeLabels merges label-set a and label-selector b with the selector's +// labels having precedence. The types are distinct because of the inputs at +// hand where this function is used. +func mergeLabels(a []storepb.Label, b labels.Labels) []storepb.Label { + ls := map[string]string{} + for _, l := range a { + ls[l.Name] = l.Value + } + for _, l := range b { + ls[l.Name] = l.Value + } + + res := []storepb.Label{} + for k, v := range ls { + res = append(res, storepb.Label{Name: k, Value: v}) + } + + return res +} + +type ctxRespSender struct { + ctx context.Context + ch chan<- *storepb.SeriesResponse +} + +func newRespCh(ctx context.Context, buffer int) (*ctxRespSender, <-chan *storepb.SeriesResponse, func()) { + respCh := make(chan *storepb.SeriesResponse, buffer) + return &ctxRespSender{ctx: ctx, ch: respCh}, respCh, func() { close(respCh) } +} + +func (s ctxRespSender) send(r *storepb.SeriesResponse) { + select { + case <-s.ctx.Done(): + return + case s.ch <- r: + return + } +} + +// Series returns all series for a requested time range and label matcher. Requested series are taken from other +// stores and proxied to RPC client. NOTE: Resulted data are not trimmed exactly to min and max time range. +func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + match, newMatchers, err := matchesExternalLabels(r.Matchers, s.selectorLabels) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + if !match { + return nil + } + + if len(newMatchers) == 0 { + return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) + } + + var ( + g, gctx = errgroup.WithContext(srv.Context()) + + // Allow to buffer max 10 series response. + // Each might be quite large (multi chunk long series given by sidecar). + respSender, respRecv, closeFn = newRespCh(gctx, 10) + ) + + g.Go(func() error { + var ( + seriesSet []storepb.SeriesSet + storeDebugMsgs []string + r = &storepb.SeriesRequest{ + MinTime: r.MinTime, + MaxTime: r.MaxTime, + Matchers: newMatchers, + Aggregates: r.Aggregates, + MaxResolutionWindow: r.MaxResolutionWindow, + PartialResponseDisabled: r.PartialResponseDisabled, + } + wg = &sync.WaitGroup{} + ) + + defer func() { + wg.Wait() + closeFn() + }() + + for _, st := range s.stores() { + // We might be able to skip the store if its meta information indicates + // it cannot have series matching our query. + // NOTE: all matchers are validated in matchesExternalLabels method so we explicitly ignore error. + spanStoreMathes, gctx := tracing.StartSpan(gctx, "store_matches") + ok, _ := storeMatches(st, r.MinTime, r.MaxTime, r.Matchers...) + spanStoreMathes.Finish() + if !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out", st)) + continue + } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s queried", st)) + + // This is used to cancel this stream when one operations takes too long. + seriesCtx, closeSeries := context.WithCancel(gctx) + seriesCtx = grpc_opentracing.ClientAddContextTags(seriesCtx, opentracing.Tags{ + "target": st.Addr(), + }) + defer closeSeries() + + sc, err := st.Series(seriesCtx, r) + if err != nil { + storeID := storepb.LabelSetsToString(st.LabelSets()) + if storeID == "" { + storeID = "Store Gateway" + } + err = errors.Wrapf(err, "fetch series for %s %s", storeID, st) + if r.PartialResponseDisabled { + level.Error(s.logger).Log("err", err, "msg", "partial response disabled; aborting request") + return err + } + respSender.send(storepb.NewWarnSeriesResponse(err)) + continue + } + + // Schedule streamSeriesSet that translates gRPC streamed response + // into seriesSet (if series) or respCh if warnings. + seriesSet = append(seriesSet, startStreamSeriesSet(seriesCtx, s.logger, closeSeries, + wg, sc, respSender, st.String(), !r.PartialResponseDisabled, s.responseTimeout)) + } + + level.Debug(s.logger).Log("msg", strings.Join(storeDebugMsgs, ";")) + if len(seriesSet) == 0 { + // This is indicates that configured StoreAPIs are not the ones end user expects + err := errors.New("No store matched for this query") + level.Warn(s.logger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) + respSender.send(storepb.NewWarnSeriesResponse(err)) + return nil + } + + mergedSet := storepb.MergeSeriesSets(seriesSet...) + for mergedSet.Next() { + var series storepb.Series + series.Labels, series.Chunks = mergedSet.At() + respSender.send(storepb.NewSeriesResponse(&series)) + } + return mergedSet.Err() + }) + + for resp := range respRecv { + if err := srv.Send(resp); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + } + } + + if err := g.Wait(); err != nil { + level.Error(s.logger).Log("err", err) + return err + } + return nil +} + +type warnSender interface { + send(*storepb.SeriesResponse) +} + +// streamSeriesSet iterates over incoming stream of series. +// All errors are sent out of band via warning channel. +type streamSeriesSet struct { + ctx context.Context + logger log.Logger + + stream storepb.Store_SeriesClient + warnCh warnSender + + currSeries *storepb.Series + recvCh chan *storepb.Series + + errMtx sync.Mutex + err error + + name string + partialResponse bool + + responseTimeout time.Duration + closeSeries context.CancelFunc +} + +func startStreamSeriesSet( + ctx context.Context, + logger log.Logger, + closeSeries context.CancelFunc, + wg *sync.WaitGroup, + stream storepb.Store_SeriesClient, + warnCh warnSender, + name string, + partialResponse bool, + responseTimeout time.Duration, +) *streamSeriesSet { + s := &streamSeriesSet{ + ctx: ctx, + logger: logger, + closeSeries: closeSeries, + stream: stream, + warnCh: warnCh, + recvCh: make(chan *storepb.Series, 10), + name: name, + partialResponse: partialResponse, + responseTimeout: responseTimeout, + } + + wg.Add(1) + go func() { + defer wg.Done() + defer close(s.recvCh) + + for { + r, err := s.stream.Recv() + + if err == io.EOF { + return + } + + if err != nil { + wrapErr := errors.Wrapf(err, "receive series from %s", s.name) + if partialResponse { + s.warnCh.send(storepb.NewWarnSeriesResponse(wrapErr)) + return + } + + s.errMtx.Lock() + s.err = wrapErr + s.errMtx.Unlock() + return + } + + if w := r.GetWarning(); w != "" { + s.warnCh.send(storepb.NewWarnSeriesResponse(errors.New(w))) + continue + } + + select { + case s.recvCh <- r.GetSeries(): + continue + case <-ctx.Done(): + return + } + + } + }() + return s +} + +// Next blocks until new message is received or stream is closed or operation is timed out. +func (s *streamSeriesSet) Next() (ok bool) { + ctx := s.ctx + timeoutMsg := fmt.Sprintf("failed to receive any data from %s", s.name) + + if s.responseTimeout != 0 { + timeoutMsg = fmt.Sprintf("failed to receive any data in %s from %s", s.responseTimeout.String(), s.name) + + timeoutCtx, done := context.WithTimeout(s.ctx, s.responseTimeout) + defer done() + ctx = timeoutCtx + } + + select { + case s.currSeries, ok = <-s.recvCh: + return ok + case <-ctx.Done(): + // closeSeries to shutdown a goroutine in startStreamSeriesSet. + s.closeSeries() + + err := errors.Wrap(ctx.Err(), timeoutMsg) + if s.partialResponse { + level.Warn(s.logger).Log("err", err, "msg", "returning partial response") + s.warnCh.send(storepb.NewWarnSeriesResponse(err)) + return false + } + s.errMtx.Lock() + s.err = err + s.errMtx.Unlock() + + level.Warn(s.logger).Log("err", err, "msg", "partial response disabled; aborting request") + return false + } +} + +func (s *streamSeriesSet) At() ([]storepb.Label, []storepb.AggrChunk) { + if s.currSeries == nil { + return nil, nil + } + return s.currSeries.Labels, s.currSeries.Chunks +} +func (s *streamSeriesSet) Err() error { + s.errMtx.Lock() + defer s.errMtx.Unlock() + return errors.Wrap(s.err, s.name) +} + +// matchStore returns true if the given store may hold data for the given label +// matchers. +func storeMatches(s Client, mint, maxt int64, matchers ...storepb.LabelMatcher) (bool, error) { + storeMinTime, storeMaxTime := s.TimeRange() + if mint > storeMaxTime || maxt < storeMinTime { + return false, nil + } + return labelSetsMatch(s.LabelSets(), matchers) +} + +// labelSetsMatch returns false if all label-set do not match the matchers. +func labelSetsMatch(lss []storepb.LabelSet, matchers []storepb.LabelMatcher) (bool, error) { + if len(lss) == 0 { + return true, nil + } + + res := false + for _, ls := range lss { + lsMatch, err := labelSetMatches(ls, matchers) + if err != nil { + return false, err + } + res = res || lsMatch + } + return res, nil +} + +// labelSetMatches returns false if any matcher matches negatively against the +// respective label-value for the matcher's label-name. +func labelSetMatches(ls storepb.LabelSet, matchers []storepb.LabelMatcher) (bool, error) { + for _, m := range matchers { + for _, l := range ls.Labels { + if l.Name != m.Name { + continue + } + + m, err := translateMatcher(m) + if err != nil { + return false, err + } + + if !m.Matches(l.Value) { + return false, nil + } + } + } + return true, nil +} + +// LabelNames returns all known label names. +func (s *ProxyStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( + *storepb.LabelNamesResponse, error, +) { + var ( + warnings []string + names [][]string + mtx sync.Mutex + g, gctx = errgroup.WithContext(ctx) + ) + + for _, st := range s.stores() { + st := st + g.Go(func() error { + resp, err := st.LabelNames(gctx, &storepb.LabelNamesRequest{ + PartialResponseDisabled: r.PartialResponseDisabled, + }) + if err != nil { + err = errors.Wrapf(err, "fetch label names from store %s", st) + if r.PartialResponseDisabled { + return err + } + + mtx.Lock() + warnings = append(warnings, err.Error()) + mtx.Unlock() + return nil + } + + mtx.Lock() + warnings = append(warnings, resp.Warnings...) + names = append(names, resp.Names) + mtx.Unlock() + + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, err + } + + return &storepb.LabelNamesResponse{ + Names: strutil.MergeUnsortedSlices(names...), + Warnings: warnings, + }, nil +} + +// LabelValues returns all known label values for a given label name. +func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( + *storepb.LabelValuesResponse, error, +) { + var ( + warnings []string + all [][]string + mtx sync.Mutex + g, gctx = errgroup.WithContext(ctx) + ) + + for _, st := range s.stores() { + store := st + g.Go(func() error { + resp, err := store.LabelValues(gctx, &storepb.LabelValuesRequest{ + Label: r.Label, + PartialResponseDisabled: r.PartialResponseDisabled, + }) + if err != nil { + err = errors.Wrapf(err, "fetch label values from store %s", store) + if r.PartialResponseDisabled { + return err + } + + mtx.Lock() + warnings = append(warnings, errors.Wrap(err, "fetch label values").Error()) + mtx.Unlock() + return nil + } + + mtx.Lock() + warnings = append(warnings, resp.Warnings...) + all = append(all, resp.Values) + mtx.Unlock() + + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, err + } + + return &storepb.LabelValuesResponse{ + Values: strutil.MergeUnsortedSlices(all...), + Warnings: warnings, + }, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go new file mode 100644 index 0000000000..c942dd1355 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -0,0 +1,189 @@ +package storepb + +import ( + "strings" + + "github.com/prometheus/prometheus/pkg/labels" +) + +var PartialResponseStrategyValues = func() []string { + var s []string + for k := range PartialResponseStrategy_value { + s = append(s, k) + } + return s +}() + +func NewWarnSeriesResponse(err error) *SeriesResponse { + return &SeriesResponse{ + Result: &SeriesResponse_Warning{ + Warning: err.Error(), + }, + } +} + +func NewSeriesResponse(series *Series) *SeriesResponse { + return &SeriesResponse{ + Result: &SeriesResponse_Series{ + Series: series, + }, + } +} + +// CompareLabels compares two sets of labels. +func CompareLabels(a, b []Label) int { + l := len(a) + if len(b) < l { + l = len(b) + } + for i := 0; i < l; i++ { + if d := strings.Compare(a[i].Name, b[i].Name); d != 0 { + return d + } + if d := strings.Compare(a[i].Value, b[i].Value); d != 0 { + return d + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} + +type emptySeriesSet struct{} + +func (emptySeriesSet) Next() bool { return false } +func (emptySeriesSet) At() ([]Label, []AggrChunk) { return nil, nil } +func (emptySeriesSet) Err() error { return nil } + +// EmptySeriesSet returns a new series set that contains no series. +func EmptySeriesSet() SeriesSet { + return emptySeriesSet{} +} + +// MergeSeriesSets returns a new series set that is the union of the input sets. +func MergeSeriesSets(all ...SeriesSet) SeriesSet { + switch len(all) { + case 0: + return emptySeriesSet{} + case 1: + return all[0] + } + h := len(all) / 2 + + return newMergedSeriesSet( + MergeSeriesSets(all[:h]...), + MergeSeriesSets(all[h:]...), + ) +} + +// SeriesSet is a set of series and their corresponding chunks. +// The set is sorted by the label sets. Chunks may be overlapping or expected of order. +type SeriesSet interface { + Next() bool + At() ([]Label, []AggrChunk) + Err() error +} + +// mergedSeriesSet takes two series sets as a single series set. +type mergedSeriesSet struct { + a, b SeriesSet + + lset []Label + chunks []AggrChunk + adone, bdone bool +} + +// newMergedSeriesSet takes two series sets as a single series set. +// Series that occur in both sets should have disjoint time ranges. +// If the ranges overlap b samples are appended to a samples. +// If the single SeriesSet returns same series within many iterations, +// merge series set will not try to merge those. +func newMergedSeriesSet(a, b SeriesSet) *mergedSeriesSet { + s := &mergedSeriesSet{a: a, b: b} + // Initialize first elements of both sets as Next() needs + // one element look-ahead. + s.adone = !s.a.Next() + s.bdone = !s.b.Next() + + return s +} + +func (s *mergedSeriesSet) At() ([]Label, []AggrChunk) { + return s.lset, s.chunks +} + +func (s *mergedSeriesSet) Err() error { + if s.a.Err() != nil { + return s.a.Err() + } + return s.b.Err() +} + +func (s *mergedSeriesSet) compare() int { + if s.adone { + return 1 + } + if s.bdone { + return -1 + } + lsetA, _ := s.a.At() + lsetB, _ := s.b.At() + return CompareLabels(lsetA, lsetB) +} + +func (s *mergedSeriesSet) Next() bool { + if s.adone && s.bdone || s.Err() != nil { + return false + } + + d := s.compare() + + // Both sets contain the current series. Chain them into a single one. + if d > 0 { + s.lset, s.chunks = s.b.At() + s.bdone = !s.b.Next() + } else if d < 0 { + s.lset, s.chunks = s.a.At() + s.adone = !s.a.Next() + } else { + // Concatenate chunks from both series sets. They may be expected of order + // w.r.t to their time range. This must be accounted for later. + lset, chksA := s.a.At() + _, chksB := s.b.At() + + s.lset = lset + // Slice reuse is not generally safe with nested merge iterators. + // We err on the safe side an create a new slice. + s.chunks = make([]AggrChunk, 0, len(chksA)+len(chksB)) + s.chunks = append(s.chunks, chksA...) + s.chunks = append(s.chunks, chksB...) + + s.adone = !s.a.Next() + s.bdone = !s.b.Next() + } + return true +} + +func LabelsToPromLabels(lset []Label) labels.Labels { + ret := make(labels.Labels, len(lset)) + for i, l := range lset { + ret[i] = labels.Label{Name: l.Name, Value: l.Value} + } + + return ret +} + +func LabelsToString(lset []Label) string { + var s []string + for _, l := range lset { + s = append(s, l.String()) + } + return "[" + strings.Join(s, ",") + "]" +} + +func LabelSetsToString(lsets []LabelSet) string { + s := []string{} + for _, ls := range lsets { + s = append(s, LabelsToString(ls.Labels)) + } + return strings.Join(s, "") +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go new file mode 100644 index 0000000000..3c46313e66 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.pb.go @@ -0,0 +1,2917 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rpc.proto + +package storepb + +import ( + context "context" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type StoreType int32 + +const ( + StoreType_UNKNOWN StoreType = 0 + StoreType_QUERY StoreType = 1 + StoreType_RULE StoreType = 2 + StoreType_SIDECAR StoreType = 3 + StoreType_STORE StoreType = 4 + StoreType_RECEIVE StoreType = 5 +) + +var StoreType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "QUERY", + 2: "RULE", + 3: "SIDECAR", + 4: "STORE", + 5: "RECEIVE", +} + +var StoreType_value = map[string]int32{ + "UNKNOWN": 0, + "QUERY": 1, + "RULE": 2, + "SIDECAR": 3, + "STORE": 4, + "RECEIVE": 5, +} + +func (x StoreType) String() string { + return proto.EnumName(StoreType_name, int32(x)) +} + +func (StoreType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} + +/// PartialResponseStrategy controls partial response handling. +type PartialResponseStrategy int32 + +const ( + /// WARN strategy tells server to treat any error that will related to single StoreAPI (e.g missing chunk series because of underlying + /// storeAPI is temporarily not available) as warning which will not fail the whole query (still OK response). + /// Server should produce those as a warnings field in response. + PartialResponseStrategy_WARN PartialResponseStrategy = 0 + /// ABORT strategy tells server to treat any error that will related to single StoreAPI (e.g missing chunk series because of underlying + /// storeAPI is temporarily not available) as the gRPC error that aborts the query. + /// + /// This is especially useful for any rule/alert evaluations on top of StoreAPI which usually does not tolerate partial + /// errors. + PartialResponseStrategy_ABORT PartialResponseStrategy = 1 +) + +var PartialResponseStrategy_name = map[int32]string{ + 0: "WARN", + 1: "ABORT", +} + +var PartialResponseStrategy_value = map[string]int32{ + "WARN": 0, + "ABORT": 1, +} + +func (x PartialResponseStrategy) String() string { + return proto.EnumName(PartialResponseStrategy_name, int32(x)) +} + +func (PartialResponseStrategy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} +} + +type Aggr int32 + +const ( + Aggr_RAW Aggr = 0 + Aggr_COUNT Aggr = 1 + Aggr_SUM Aggr = 2 + Aggr_MIN Aggr = 3 + Aggr_MAX Aggr = 4 + Aggr_COUNTER Aggr = 5 +) + +var Aggr_name = map[int32]string{ + 0: "RAW", + 1: "COUNT", + 2: "SUM", + 3: "MIN", + 4: "MAX", + 5: "COUNTER", +} + +var Aggr_value = map[string]int32{ + "RAW": 0, + "COUNT": 1, + "SUM": 2, + "MIN": 3, + "MAX": 4, + "COUNTER": 5, +} + +func (x Aggr) String() string { + return proto.EnumName(Aggr_name, int32(x)) +} + +func (Aggr) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} +} + +type InfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo + +type InfoResponse struct { + // Deprecated. Use label_sets instead. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,3,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + StoreType StoreType `protobuf:"varint,4,opt,name=storeType,proto3,enum=thanos.StoreType" json:"storeType,omitempty"` + // label_sets is an unsorted list of `LabelSet`s. + LabelSets []LabelSet `protobuf:"bytes,5,rep,name=label_sets,json=labelSets,proto3" json:"label_sets"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + +type LabelSet struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelSet) Reset() { *m = LabelSet{} } +func (m *LabelSet) String() string { return proto.CompactTextString(m) } +func (*LabelSet) ProtoMessage() {} +func (*LabelSet) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} +} +func (m *LabelSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelSet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSet.Merge(m, src) +} +func (m *LabelSet) XXX_Size() int { + return m.Size() +} +func (m *LabelSet) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSet.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSet proto.InternalMessageInfo + +type SeriesRequest struct { + MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers"` + MaxResolutionWindow int64 `protobuf:"varint,4,opt,name=max_resolution_window,json=maxResolutionWindow,proto3" json:"max_resolution_window,omitempty"` + Aggregates []Aggr `protobuf:"varint,5,rep,packed,name=aggregates,proto3,enum=thanos.Aggr" json:"aggregates,omitempty"` + // Deprecated. Use partial_response_strategy instead. + PartialResponseDisabled bool `protobuf:"varint,6,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy PartialResponseStrategy `protobuf:"varint,7,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } +func (m *SeriesRequest) String() string { return proto.CompactTextString(m) } +func (*SeriesRequest) ProtoMessage() {} +func (*SeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} +} +func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesRequest.Merge(m, src) +} +func (m *SeriesRequest) XXX_Size() int { + return m.Size() +} +func (m *SeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesRequest proto.InternalMessageInfo + +type SeriesResponse struct { + // Types that are valid to be assigned to Result: + // *SeriesResponse_Series + // *SeriesResponse_Warning + Result isSeriesResponse_Result `protobuf_oneof:"result"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } +func (m *SeriesResponse) String() string { return proto.CompactTextString(m) } +func (*SeriesResponse) ProtoMessage() {} +func (*SeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{4} +} +func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesResponse.Merge(m, src) +} +func (m *SeriesResponse) XXX_Size() int { + return m.Size() +} +func (m *SeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesResponse proto.InternalMessageInfo + +type isSeriesResponse_Result interface { + isSeriesResponse_Result() + MarshalTo([]byte) (int, error) + Size() int +} + +type SeriesResponse_Series struct { + Series *Series `protobuf:"bytes,1,opt,name=series,proto3,oneof"` +} +type SeriesResponse_Warning struct { + Warning string `protobuf:"bytes,2,opt,name=warning,proto3,oneof"` +} + +func (*SeriesResponse_Series) isSeriesResponse_Result() {} +func (*SeriesResponse_Warning) isSeriesResponse_Result() {} + +func (m *SeriesResponse) GetResult() isSeriesResponse_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *SeriesResponse) GetSeries() *Series { + if x, ok := m.GetResult().(*SeriesResponse_Series); ok { + return x.Series + } + return nil +} + +func (m *SeriesResponse) GetWarning() string { + if x, ok := m.GetResult().(*SeriesResponse_Warning); ok { + return x.Warning + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SeriesResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SeriesResponse_OneofMarshaler, _SeriesResponse_OneofUnmarshaler, _SeriesResponse_OneofSizer, []interface{}{ + (*SeriesResponse_Series)(nil), + (*SeriesResponse_Warning)(nil), + } +} + +func _SeriesResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SeriesResponse) + // result + switch x := m.Result.(type) { + case *SeriesResponse_Series: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Series); err != nil { + return err + } + case *SeriesResponse_Warning: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Warning) + case nil: + default: + return fmt.Errorf("SeriesResponse.Result has unexpected type %T", x) + } + return nil +} + +func _SeriesResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SeriesResponse) + switch tag { + case 1: // result.series + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Series) + err := b.DecodeMessage(msg) + m.Result = &SeriesResponse_Series{msg} + return true, err + case 2: // result.warning + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &SeriesResponse_Warning{x} + return true, err + default: + return false, nil + } +} + +func _SeriesResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SeriesResponse) + // result + switch x := m.Result.(type) { + case *SeriesResponse_Series: + s := proto.Size(x.Series) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *SeriesResponse_Warning: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Warning))) + n += len(x.Warning) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type LabelNamesRequest struct { + PartialResponseDisabled bool `protobuf:"varint,1,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy PartialResponseStrategy `protobuf:"varint,2,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } +func (m *LabelNamesRequest) String() string { return proto.CompactTextString(m) } +func (*LabelNamesRequest) ProtoMessage() {} +func (*LabelNamesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{5} +} +func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequest.Merge(m, src) +} +func (m *LabelNamesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo + +type LabelNamesResponse struct { + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } +func (m *LabelNamesResponse) String() string { return proto.CompactTextString(m) } +func (*LabelNamesResponse) ProtoMessage() {} +func (*LabelNamesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{6} +} +func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponse.Merge(m, src) +} +func (m *LabelNamesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo + +type LabelValuesRequest struct { + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + PartialResponseDisabled bool `protobuf:"varint,2,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy PartialResponseStrategy `protobuf:"varint,3,opt,name=partial_response_strategy,json=partialResponseStrategy,proto3,enum=thanos.PartialResponseStrategy" json:"partial_response_strategy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } +func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) } +func (*LabelValuesRequest) ProtoMessage() {} +func (*LabelValuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} +func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequest.Merge(m, src) +} +func (m *LabelValuesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo + +type LabelValuesResponse struct { + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Warnings []string `protobuf:"bytes,2,rep,name=warnings,proto3" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } +func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) } +func (*LabelValuesResponse) ProtoMessage() {} +func (*LabelValuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{8} +} +func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponse.Merge(m, src) +} +func (m *LabelValuesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("thanos.StoreType", StoreType_name, StoreType_value) + proto.RegisterEnum("thanos.PartialResponseStrategy", PartialResponseStrategy_name, PartialResponseStrategy_value) + proto.RegisterEnum("thanos.Aggr", Aggr_name, Aggr_value) + proto.RegisterType((*InfoRequest)(nil), "thanos.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "thanos.InfoResponse") + proto.RegisterType((*LabelSet)(nil), "thanos.LabelSet") + proto.RegisterType((*SeriesRequest)(nil), "thanos.SeriesRequest") + proto.RegisterType((*SeriesResponse)(nil), "thanos.SeriesResponse") + proto.RegisterType((*LabelNamesRequest)(nil), "thanos.LabelNamesRequest") + proto.RegisterType((*LabelNamesResponse)(nil), "thanos.LabelNamesResponse") + proto.RegisterType((*LabelValuesRequest)(nil), "thanos.LabelValuesRequest") + proto.RegisterType((*LabelValuesResponse)(nil), "thanos.LabelValuesResponse") +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 772 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0xfa, 0x46, + 0x10, 0x65, 0x6d, 0x30, 0x78, 0x48, 0x90, 0xb3, 0x21, 0x89, 0x71, 0x25, 0x82, 0x38, 0xa1, 0xb4, + 0x22, 0x2d, 0x55, 0x5b, 0xb5, 0x37, 0x20, 0x8e, 0x82, 0x9a, 0x40, 0xbb, 0x40, 0xe8, 0x9f, 0x03, + 0x35, 0xc9, 0xd6, 0xb1, 0x64, 0x6c, 0xea, 0x35, 0x4d, 0x72, 0xed, 0xe7, 0xe9, 0xb7, 0xe8, 0x25, + 0xc7, 0x5e, 0x7b, 0xa9, 0xda, 0x7c, 0x8a, 0x1e, 0x2b, 0xaf, 0xd7, 0x80, 0xdb, 0x24, 0xd2, 0x4f, + 0xdc, 0x76, 0xde, 0x1b, 0xcf, 0xec, 0x7b, 0x3b, 0xbb, 0x06, 0x35, 0x58, 0xdc, 0x34, 0x17, 0x81, + 0x1f, 0xfa, 0x58, 0x09, 0xef, 0x2c, 0xcf, 0x67, 0x46, 0x31, 0x7c, 0x5c, 0x50, 0x16, 0x83, 0x46, + 0xd9, 0xf6, 0x6d, 0x9f, 0x2f, 0x4f, 0xa3, 0x55, 0x8c, 0xd6, 0x77, 0xa1, 0xd8, 0xf3, 0x7e, 0xf4, + 0x09, 0xfd, 0x69, 0x49, 0x59, 0x58, 0xff, 0x03, 0xc1, 0x4e, 0x1c, 0xb3, 0x85, 0xef, 0x31, 0x8a, + 0xdf, 0x07, 0xc5, 0xb5, 0x66, 0xd4, 0x65, 0x3a, 0xaa, 0xc9, 0x8d, 0x62, 0x6b, 0xb7, 0x19, 0xd7, + 0x6e, 0x5e, 0x46, 0x68, 0x27, 0xfb, 0xf4, 0xe7, 0x71, 0x86, 0x88, 0x14, 0x5c, 0x81, 0xc2, 0xdc, + 0xf1, 0xa6, 0xa1, 0x33, 0xa7, 0xba, 0x54, 0x43, 0x0d, 0x99, 0xe4, 0xe7, 0x8e, 0x37, 0x72, 0xe6, + 0x94, 0x53, 0xd6, 0x43, 0x4c, 0xc9, 0x82, 0xb2, 0x1e, 0x38, 0x75, 0x0a, 0x2a, 0x0b, 0xfd, 0x80, + 0x8e, 0x1e, 0x17, 0x54, 0xcf, 0xd6, 0x50, 0xa3, 0xd4, 0xda, 0x4b, 0xba, 0x0c, 0x13, 0x82, 0xac, + 0x73, 0xf0, 0x27, 0x00, 0xbc, 0xe1, 0x94, 0xd1, 0x90, 0xe9, 0x39, 0xbe, 0x2f, 0x2d, 0xb5, 0xaf, + 0x21, 0x0d, 0xc5, 0xd6, 0x54, 0x57, 0xc4, 0xac, 0xfe, 0x19, 0x14, 0x12, 0xf2, 0x9d, 0x64, 0xd5, + 0xff, 0x91, 0x60, 0x77, 0x48, 0x03, 0x87, 0x32, 0x61, 0x53, 0x4a, 0x28, 0x7a, 0x5d, 0xa8, 0x94, + 0x16, 0xfa, 0x69, 0x44, 0x85, 0x37, 0x77, 0x34, 0x60, 0xba, 0xcc, 0xdb, 0x96, 0x53, 0x6d, 0xaf, + 0x62, 0x52, 0x74, 0x5f, 0xe5, 0xe2, 0x16, 0x1c, 0x44, 0x25, 0x03, 0xca, 0x7c, 0x77, 0x19, 0x3a, + 0xbe, 0x37, 0xbd, 0x77, 0xbc, 0x5b, 0xff, 0x9e, 0x9b, 0x25, 0x93, 0xfd, 0xb9, 0xf5, 0x40, 0x56, + 0xdc, 0x84, 0x53, 0xf8, 0x03, 0x00, 0xcb, 0xb6, 0x03, 0x6a, 0x5b, 0x21, 0x8d, 0x3d, 0x2a, 0xb5, + 0x76, 0x92, 0x6e, 0x6d, 0xdb, 0x0e, 0xc8, 0x06, 0x8f, 0xbf, 0x80, 0xca, 0xc2, 0x0a, 0x42, 0xc7, + 0x72, 0xa3, 0x2e, 0xfc, 0xe4, 0xa7, 0xb7, 0x0e, 0xb3, 0x66, 0x2e, 0xbd, 0xd5, 0x95, 0x1a, 0x6a, + 0x14, 0xc8, 0x91, 0x48, 0x48, 0x26, 0xe3, 0x4c, 0xd0, 0xf8, 0xfb, 0x17, 0xbe, 0x65, 0x61, 0x60, + 0x85, 0xd4, 0x7e, 0xd4, 0xf3, 0xfc, 0x38, 0x8f, 0x93, 0xc6, 0x5f, 0xa5, 0x6b, 0x0c, 0x45, 0xda, + 0xff, 0x8a, 0x27, 0x44, 0xfd, 0x07, 0x28, 0x25, 0xce, 0x8b, 0x81, 0x6c, 0x80, 0xc2, 0x38, 0xc2, + 0x8d, 0x2f, 0xb6, 0x4a, 0xab, 0x51, 0xe1, 0xe8, 0x45, 0x86, 0x08, 0x1e, 0x1b, 0x90, 0xbf, 0xb7, + 0x02, 0xcf, 0xf1, 0x6c, 0x7e, 0x10, 0xea, 0x45, 0x86, 0x24, 0x40, 0xa7, 0x00, 0x4a, 0x40, 0xd9, + 0xd2, 0x0d, 0xeb, 0xbf, 0x22, 0xd8, 0xe3, 0xee, 0xf7, 0xad, 0xf9, 0xfa, 0x80, 0xdf, 0x34, 0x04, + 0x6d, 0x61, 0x88, 0xb4, 0xa5, 0x21, 0xe7, 0x80, 0x37, 0x77, 0x2b, 0x4c, 0x29, 0x43, 0xce, 0x8b, + 0x00, 0x3e, 0xcd, 0x2a, 0x89, 0x03, 0x6c, 0x40, 0x41, 0xe8, 0x65, 0xba, 0xc4, 0x89, 0x55, 0x5c, + 0xff, 0x0d, 0x89, 0x42, 0xd7, 0x96, 0xbb, 0x5c, 0xeb, 0x2e, 0x43, 0x8e, 0x0f, 0x3d, 0xd7, 0xa8, + 0x92, 0x38, 0x78, 0xdb, 0x0d, 0x69, 0x0b, 0x37, 0xe4, 0x2d, 0xdd, 0xe8, 0xc1, 0x7e, 0x4a, 0x84, + 0xb0, 0xe3, 0x10, 0x94, 0x9f, 0x39, 0x22, 0xfc, 0x10, 0xd1, 0x5b, 0x86, 0x9c, 0x10, 0x50, 0x57, + 0x8f, 0x0d, 0x2e, 0x42, 0x7e, 0xdc, 0xff, 0xb2, 0x3f, 0x98, 0xf4, 0xb5, 0x0c, 0x56, 0x21, 0xf7, + 0xf5, 0xd8, 0x24, 0xdf, 0x6a, 0x08, 0x17, 0x20, 0x4b, 0xc6, 0x97, 0xa6, 0x26, 0x45, 0x19, 0xc3, + 0xde, 0x99, 0xd9, 0x6d, 0x13, 0x4d, 0x8e, 0x32, 0x86, 0xa3, 0x01, 0x31, 0xb5, 0x6c, 0x84, 0x13, + 0xb3, 0x6b, 0xf6, 0xae, 0x4d, 0x2d, 0x77, 0xd2, 0x84, 0xa3, 0x57, 0x24, 0x45, 0x95, 0x26, 0x6d, + 0x22, 0xca, 0xb7, 0x3b, 0x03, 0x32, 0xd2, 0xd0, 0x49, 0x07, 0xb2, 0xd1, 0xd5, 0xc4, 0x79, 0x90, + 0x49, 0x7b, 0x12, 0x73, 0xdd, 0xc1, 0xb8, 0x3f, 0xd2, 0x50, 0x84, 0x0d, 0xc7, 0x57, 0x9a, 0x14, + 0x2d, 0xae, 0x7a, 0x7d, 0x4d, 0xe6, 0x8b, 0xf6, 0x37, 0x71, 0x4f, 0x9e, 0x65, 0x12, 0x2d, 0xd7, + 0xfa, 0x45, 0x82, 0x1c, 0x17, 0x82, 0x3f, 0x82, 0x6c, 0xf4, 0x94, 0xe3, 0xfd, 0xc4, 0xde, 0x8d, + 0x87, 0xde, 0x28, 0xa7, 0x41, 0x61, 0xdc, 0xe7, 0xa0, 0xc4, 0xd7, 0x08, 0x1f, 0xa4, 0xaf, 0x55, + 0xf2, 0xd9, 0xe1, 0x7f, 0xe1, 0xf8, 0xc3, 0x0f, 0x11, 0xee, 0x02, 0xac, 0x07, 0x13, 0x57, 0x52, + 0x0f, 0xdb, 0xe6, 0xd5, 0x32, 0x8c, 0x97, 0x28, 0xd1, 0xff, 0x1c, 0x8a, 0x1b, 0xe7, 0x89, 0xd3, + 0xa9, 0xa9, 0x49, 0x35, 0xde, 0x7b, 0x91, 0x8b, 0xeb, 0x74, 0x2a, 0x4f, 0x7f, 0x57, 0x33, 0x4f, + 0xcf, 0x55, 0xf4, 0xfb, 0x73, 0x15, 0xfd, 0xf5, 0x5c, 0x45, 0xdf, 0xe5, 0xf9, 0xef, 0x63, 0x31, + 0x9b, 0x29, 0xfc, 0xbf, 0xf7, 0xf1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xcb, 0x18, 0x01, + 0x2f, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// StoreClient is the client API for Store service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StoreClient interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. + /// + /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + /// partition of the single series, but once a new series is started to be streamed it means that no more data will + /// be sent for previous one. + /// Series has to be sorted. + Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) + /// LabelNames returns all label names that is available. + /// Currently unimplemented in all Thanos implementations, because Query API does not implement this either. + LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) + /// LabelValues returns all label values for given label name. + LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) +} + +type storeClient struct { + cc *grpc.ClientConn +} + +func NewStoreClient(cc *grpc.ClientConn) StoreClient { + return &storeClient{cc} +} + +func (c *storeClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/thanos.Store/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Store_serviceDesc.Streams[0], "/thanos.Store/Series", opts...) + if err != nil { + return nil, err + } + x := &storeSeriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Store_SeriesClient interface { + Recv() (*SeriesResponse, error) + grpc.ClientStream +} + +type storeSeriesClient struct { + grpc.ClientStream +} + +func (x *storeSeriesClient) Recv() (*SeriesResponse, error) { + m := new(SeriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { + out := new(LabelNamesResponse) + err := c.cc.Invoke(ctx, "/thanos.Store/LabelNames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { + out := new(LabelValuesResponse) + err := c.cc.Invoke(ctx, "/thanos.Store/LabelValues", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StoreServer is the server API for Store service. +type StoreServer interface { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. + /// + /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + /// partition of the single series, but once a new series is started to be streamed it means that no more data will + /// be sent for previous one. + /// Series has to be sorted. + Series(*SeriesRequest, Store_SeriesServer) error + /// LabelNames returns all label names that is available. + /// Currently unimplemented in all Thanos implementations, because Query API does not implement this either. + LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) + /// LabelValues returns all label values for given label name. + LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) +} + +// UnimplementedStoreServer can be embedded to have forward compatible implementations. +type UnimplementedStoreServer struct { +} + +func (*UnimplementedStoreServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedStoreServer) Series(req *SeriesRequest, srv Store_SeriesServer) error { + return status.Errorf(codes.Unimplemented, "method Series not implemented") +} +func (*UnimplementedStoreServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") +} +func (*UnimplementedStoreServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") +} + +func RegisterStoreServer(s *grpc.Server, srv StoreServer) { + s.RegisterService(&_Store_serviceDesc, srv) +} + +func _Store_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/thanos.Store/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Series_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SeriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StoreServer).Series(m, &storeSeriesServer{stream}) +} + +type Store_SeriesServer interface { + Send(*SeriesResponse) error + grpc.ServerStream +} + +type storeSeriesServer struct { + grpc.ServerStream +} + +func (x *storeSeriesServer) Send(m *SeriesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Store_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).LabelNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/thanos.Store/LabelNames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).LabelNames(ctx, req.(*LabelNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelValuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).LabelValues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/thanos.Store/LabelValues", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).LabelValues(ctx, req.(*LabelValuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Store_serviceDesc = grpc.ServiceDesc{ + ServiceName: "thanos.Store", + HandlerType: (*StoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Store_Info_Handler, + }, + { + MethodName: "LabelNames", + Handler: _Store_LabelNames_Handler, + }, + { + MethodName: "LabelValues", + Handler: _Store_LabelValues_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Series", + Handler: _Store_Series_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.LabelSets) > 0 { + for iNdEx := len(m.LabelSets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LabelSets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.StoreType != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.StoreType)) + i-- + dAtA[i] = 0x20 + } + if m.MaxTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x18 + } + if m.MinTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PartialResponseStrategy != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.PartialResponseStrategy)) + i-- + dAtA[i] = 0x38 + } + if m.PartialResponseDisabled { + i-- + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Aggregates) > 0 { + dAtA2 := make([]byte, len(m.Aggregates)*10) + var j1 int + for _, num := range m.Aggregates { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintRpc(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x2a + } + if m.MaxResolutionWindow != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxResolutionWindow)) + i-- + dAtA[i] = 0x20 + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MaxTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x10 + } + if m.MinTime != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SeriesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *SeriesResponse_Series) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *SeriesResponse_Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *SeriesResponse_Warning) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *SeriesResponse_Warning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} +func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PartialResponseStrategy != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.PartialResponseStrategy)) + i-- + dAtA[i] = 0x10 + } + if m.PartialResponseDisabled { + i-- + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.PartialResponseStrategy != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.PartialResponseStrategy)) + i-- + dAtA[i] = 0x18 + } + if m.PartialResponseDisabled { + i-- + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Label) > 0 { + i -= len(m.Label) + copy(dAtA[i:], m.Label) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Label))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Warnings[iNdEx]) + copy(dAtA[i:], m.Warnings[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Warnings[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + offset -= sovRpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + if m.StoreType != 0 { + n += 1 + sovRpc(uint64(m.StoreType)) + } + if len(m.LabelSets) > 0 { + for _, e := range m.LabelSets { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SeriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTime != 0 { + n += 1 + sovRpc(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovRpc(uint64(m.MaxTime)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.MaxResolutionWindow != 0 { + n += 1 + sovRpc(uint64(m.MaxResolutionWindow)) + } + if len(m.Aggregates) > 0 { + l = 0 + for _, e := range m.Aggregates { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PartialResponseDisabled { + n += 2 + } + if m.PartialResponseStrategy != 0 { + n += 1 + sovRpc(uint64(m.PartialResponseStrategy)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SeriesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + n += m.Result.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SeriesResponse_Series) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *SeriesResponse_Warning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Warning) + n += 1 + l + sovRpc(uint64(l)) + return n +} +func (m *LabelNamesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PartialResponseDisabled { + n += 2 + } + if m.PartialResponseStrategy != 0 { + n += 1 + sovRpc(uint64(m.PartialResponseStrategy)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelNamesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelValuesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Label) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PartialResponseDisabled { + n += 2 + } + if m.PartialResponseStrategy != 0 { + n += 1 + sovRpc(uint64(m.PartialResponseStrategy)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelValuesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Warnings) > 0 { + for _, s := range m.Warnings { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreType", wireType) + } + m.StoreType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreType |= StoreType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSets = append(m.LabelSets, LabelSet{}) + if err := m.LabelSets[len(m.LabelSets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxResolutionWindow", wireType) + } + m.MaxResolutionWindow = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxResolutionWindow |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType == 0 { + var v Aggr + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= Aggr(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Aggregates = append(m.Aggregates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.Aggregates) == 0 { + m.Aggregates = make([]Aggr, 0, elementCount) + } + for iNdEx < postIndex { + var v Aggr + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= Aggr(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Aggregates = append(m.Aggregates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Aggregates", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseStrategy", wireType) + } + m.PartialResponseStrategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartialResponseStrategy |= PartialResponseStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Series{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_Series{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = &SeriesResponse_Warning{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseStrategy", wireType) + } + m.PartialResponseStrategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartialResponseStrategy |= PartialResponseStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseStrategy", wireType) + } + m.PartialResponseStrategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PartialResponseStrategy |= PartialResponseStrategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRpc + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRpc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRpc + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto new file mode 100644 index 0000000000..8b2098e43c --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; +package thanos; + +import "types.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "storepb"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +/// Store reprents API against instance that stores XOR encoded values with label set metadata (e.g Prometheus metrics). +service Store { + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. + rpc Info(InfoRequest) returns (InfoResponse); + + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. + /// + /// Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + /// partition of the single series, but once a new series is started to be streamed it means that no more data will + /// be sent for previous one. + /// Series has to be sorted. + rpc Series(SeriesRequest) returns (stream SeriesResponse); + + /// LabelNames returns all label names that is available. + /// Currently unimplemented in all Thanos implementations, because Query API does not implement this either. + rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse); + + /// LabelValues returns all label values for given label name. + rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse); +} + +message InfoRequest { +} + +enum StoreType { + UNKNOWN = 0; + QUERY = 1; + RULE = 2; + SIDECAR = 3; + STORE = 4; + RECEIVE = 5; +} + +message InfoResponse { + // Deprecated. Use label_sets instead. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + int64 min_time = 2; + int64 max_time = 3; + StoreType storeType = 4; + // label_sets is an unsorted list of `LabelSet`s. + repeated LabelSet label_sets = 5 [(gogoproto.nullable) = false]; +} + +message LabelSet { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + +/// PartialResponseStrategy controls partial response handling. +enum PartialResponseStrategy { + /// WARN strategy tells server to treat any error that will related to single StoreAPI (e.g missing chunk series because of underlying + /// storeAPI is temporarily not available) as warning which will not fail the whole query (still OK response). + /// Server should produce those as a warnings field in response. + WARN = 0; + /// ABORT strategy tells server to treat any error that will related to single StoreAPI (e.g missing chunk series because of underlying + /// storeAPI is temporarily not available) as the gRPC error that aborts the query. + /// + /// This is especially useful for any rule/alert evaluations on top of StoreAPI which usually does not tolerate partial + /// errors. + ABORT = 1; +} + +message SeriesRequest { + int64 min_time = 1; + int64 max_time = 2; + repeated LabelMatcher matchers = 3 [(gogoproto.nullable) = false]; + + int64 max_resolution_window = 4; + repeated Aggr aggregates = 5; + + // Deprecated. Use partial_response_strategy instead. + bool partial_response_disabled = 6; + + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy partial_response_strategy = 7; +} + +enum Aggr { + RAW = 0; + COUNT = 1; + SUM = 2; + MIN = 3; + MAX = 4; + COUNTER = 5; +} + +message SeriesResponse { + oneof result { + Series series = 1; + + /// warning is considered an information piece in place of series for warning purposes. + /// It is used to warn query customer about suspicious cases or partial response (if enabled). + string warning = 2; + } +} + +message LabelNamesRequest { + bool partial_response_disabled = 1; + + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy partial_response_strategy = 2; +} + +message LabelNamesResponse { + repeated string names = 1; + repeated string warnings = 2; +} + +message LabelValuesRequest { + string label = 1; + + bool partial_response_disabled = 2; + + // TODO(bwplotka): Move Thanos components to use strategy instead. Including QueryAPI. + PartialResponseStrategy partial_response_strategy = 3; +} + +message LabelValuesResponse { + repeated string values = 1; + repeated string warnings = 2; +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go new file mode 100644 index 0000000000..86a8b06237 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.pb.go @@ -0,0 +1,1672 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types.proto + +package storepb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Chunk_Encoding int32 + +const ( + Chunk_XOR Chunk_Encoding = 0 +) + +var Chunk_Encoding_name = map[int32]string{ + 0: "XOR", +} + +var Chunk_Encoding_value = map[string]int32{ + "XOR": 0, +} + +func (x Chunk_Encoding) String() string { + return proto.EnumName(Chunk_Encoding_name, int32(x)) +} + +func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1, 0} +} + +type LabelMatcher_Type int32 + +const ( + LabelMatcher_EQ LabelMatcher_Type = 0 + LabelMatcher_NEQ LabelMatcher_Type = 1 + LabelMatcher_RE LabelMatcher_Type = 2 + LabelMatcher_NRE LabelMatcher_Type = 3 +) + +var LabelMatcher_Type_name = map[int32]string{ + 0: "EQ", + 1: "NEQ", + 2: "RE", + 3: "NRE", +} + +var LabelMatcher_Type_value = map[string]int32{ + "EQ": 0, + "NEQ": 1, + "RE": 2, + "NRE": 3, +} + +func (x LabelMatcher_Type) String() string { + return proto.EnumName(LabelMatcher_Type_name, int32(x)) +} + +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4, 0} +} + +type Label struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(m, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) +} + +var xxx_messageInfo_Label proto.InternalMessageInfo + +type Chunk struct { + Type Chunk_Encoding `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.Chunk_Encoding" json:"type,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (m *Chunk) String() string { return proto.CompactTextString(m) } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +type Series struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Chunks []AggrChunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} +func (*Series) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *Series) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Series.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Series) XXX_Merge(src proto.Message) { + xxx_messageInfo_Series.Merge(m, src) +} +func (m *Series) XXX_Size() int { + return m.Size() +} +func (m *Series) XXX_DiscardUnknown() { + xxx_messageInfo_Series.DiscardUnknown(m) +} + +var xxx_messageInfo_Series proto.InternalMessageInfo + +type AggrChunk struct { + MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Raw *Chunk `protobuf:"bytes,3,opt,name=raw,proto3" json:"raw,omitempty"` + Count *Chunk `protobuf:"bytes,4,opt,name=count,proto3" json:"count,omitempty"` + Sum *Chunk `protobuf:"bytes,5,opt,name=sum,proto3" json:"sum,omitempty"` + Min *Chunk `protobuf:"bytes,6,opt,name=min,proto3" json:"min,omitempty"` + Max *Chunk `protobuf:"bytes,7,opt,name=max,proto3" json:"max,omitempty"` + Counter *Chunk `protobuf:"bytes,8,opt,name=counter,proto3" json:"counter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AggrChunk) Reset() { *m = AggrChunk{} } +func (m *AggrChunk) String() string { return proto.CompactTextString(m) } +func (*AggrChunk) ProtoMessage() {} +func (*AggrChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *AggrChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggrChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AggrChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AggrChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggrChunk.Merge(m, src) +} +func (m *AggrChunk) XXX_Size() int { + return m.Size() +} +func (m *AggrChunk) XXX_DiscardUnknown() { + xxx_messageInfo_AggrChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_AggrChunk proto.InternalMessageInfo + +// Matcher specifies a rule, which can match or set of labels or not. +type LabelMatcher struct { + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("thanos.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) + proto.RegisterEnum("thanos.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) + proto.RegisterType((*Label)(nil), "thanos.Label") + proto.RegisterType((*Chunk)(nil), "thanos.Chunk") + proto.RegisterType((*Series)(nil), "thanos.Series") + proto.RegisterType((*AggrChunk)(nil), "thanos.AggrChunk") + proto.RegisterType((*LabelMatcher)(nil), "thanos.LabelMatcher") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0xc7, 0xeb, 0x7c, 0x76, 0x67, 0x03, 0x05, 0x33, 0x21, 0x97, 0x8b, 0xae, 0x0a, 0x17, 0x54, + 0x20, 0x32, 0x31, 0x9e, 0x80, 0xa1, 0xdc, 0xf1, 0xa1, 0x99, 0x5d, 0x20, 0x84, 0x84, 0xdc, 0xce, + 0xa4, 0x11, 0x8d, 0x5d, 0xc5, 0x0e, 0x74, 0x8f, 0x81, 0x78, 0xa9, 0x5e, 0xf2, 0x04, 0x08, 0xfa, + 0x24, 0xc8, 0x27, 0x0d, 0x5b, 0xa5, 0xdc, 0x1d, 0x9f, 0xff, 0xef, 0x7c, 0xc8, 0xe7, 0x0f, 0x87, + 0xf6, 0x7a, 0x25, 0x4d, 0xb6, 0xaa, 0xb5, 0xd5, 0x34, 0xb2, 0x0b, 0xa1, 0xb4, 0x79, 0x78, 0x5c, + 0xe8, 0x42, 0x63, 0xea, 0xd4, 0x45, 0xad, 0x9a, 0x3e, 0x87, 0xf0, 0xb5, 0x98, 0xc9, 0x25, 0xa5, + 0x10, 0x28, 0x51, 0x49, 0x46, 0x26, 0x64, 0x7a, 0xc0, 0x31, 0xa6, 0xc7, 0x10, 0x7e, 0x13, 0xcb, + 0x46, 0x32, 0x0f, 0x93, 0xed, 0x23, 0xfd, 0x04, 0xe1, 0xab, 0x45, 0xa3, 0xbe, 0xd2, 0x27, 0x10, + 0xb8, 0x41, 0x58, 0x72, 0xf7, 0xec, 0x41, 0xd6, 0x0e, 0xca, 0x50, 0xcc, 0x72, 0x35, 0xd7, 0x57, + 0xa5, 0x2a, 0x38, 0x32, 0xae, 0xfd, 0x95, 0xb0, 0x02, 0x3b, 0x1d, 0x71, 0x8c, 0xd3, 0xfb, 0x30, + 0xec, 0x28, 0x1a, 0x83, 0xff, 0xe1, 0x1d, 0x4f, 0x06, 0xe9, 0x17, 0x88, 0xde, 0xcb, 0xba, 0x94, + 0x86, 0x3e, 0x85, 0x68, 0xe9, 0x56, 0x33, 0x8c, 0x4c, 0xfc, 0xe9, 0xe1, 0xd9, 0x9d, 0x6e, 0x00, + 0x2e, 0x7c, 0x1e, 0x6c, 0x7e, 0x9f, 0x0c, 0xf8, 0x0e, 0xa1, 0xa7, 0x10, 0xcd, 0xdd, 0x5c, 0xc3, + 0x3c, 0x84, 0xef, 0x75, 0xf0, 0xcb, 0xa2, 0xa8, 0x71, 0xa3, 0xae, 0xa0, 0xc5, 0xd2, 0x9f, 0x1e, + 0x1c, 0xfc, 0xd7, 0xe8, 0x08, 0x86, 0x55, 0xa9, 0x3e, 0xdb, 0x72, 0xf7, 0x03, 0x3e, 0x8f, 0xab, + 0x52, 0x5d, 0x96, 0x95, 0x44, 0x49, 0xac, 0x5b, 0xc9, 0xdb, 0x49, 0x62, 0x8d, 0xd2, 0x09, 0xf8, + 0xb5, 0xf8, 0xce, 0xfc, 0x09, 0xb9, 0xbd, 0x1e, 0x76, 0xe4, 0x4e, 0xa1, 0x8f, 0x20, 0x9c, 0xeb, + 0x46, 0x59, 0x16, 0xf4, 0x21, 0xad, 0xe6, 0xba, 0x98, 0xa6, 0x62, 0x61, 0x6f, 0x17, 0xd3, 0x54, + 0x0e, 0xa8, 0x4a, 0xc5, 0xa2, 0x5e, 0xa0, 0x2a, 0x15, 0x02, 0x62, 0xcd, 0xe2, 0x7e, 0x40, 0xac, + 0xe9, 0x63, 0x88, 0x71, 0x96, 0xac, 0xd9, 0xb0, 0x0f, 0xea, 0xd4, 0xf4, 0x07, 0x81, 0x23, 0xfc, + 0xde, 0x37, 0xc2, 0xce, 0x17, 0xb2, 0xa6, 0xcf, 0xf6, 0x6e, 0x3c, 0xda, 0x3b, 0xc1, 0x8e, 0xc9, + 0x2e, 0xaf, 0x57, 0xf2, 0xe6, 0xcc, 0xe8, 0x22, 0xaf, 0xcf, 0x45, 0xfe, 0x6d, 0x17, 0x4d, 0x21, + 0x70, 0x75, 0x34, 0x02, 0x2f, 0xbf, 0x48, 0x06, 0xce, 0x00, 0x6f, 0xf3, 0x8b, 0x84, 0xb8, 0x04, + 0xcf, 0x13, 0x0f, 0x13, 0x3c, 0x4f, 0xfc, 0xf3, 0xd1, 0xe6, 0xef, 0x78, 0xb0, 0xd9, 0x8e, 0xc9, + 0xaf, 0xed, 0x98, 0xfc, 0xd9, 0x8e, 0xc9, 0xc7, 0xd8, 0x58, 0x5d, 0xcb, 0xd5, 0x6c, 0x16, 0xa1, + 0x89, 0x5f, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x46, 0x5a, 0xe2, 0x68, 0xf1, 0x02, 0x00, 0x00, +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Series) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Series) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AggrChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggrChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggrChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Max != nil { + { + size, err := m.Max.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Min != nil { + { + size, err := m.Min.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Sum != nil { + { + size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Count != nil { + { + size, err := m.Count.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Raw != nil { + { + size, err := m.Raw.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.MaxTime != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x10 + } + if m.MinTime != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Series) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AggrChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTime != 0 { + n += 1 + sovTypes(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + sovTypes(uint64(m.MaxTime)) + } + if m.Raw != nil { + l = m.Raw.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Count != nil { + l = m.Count.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Sum != nil { + l = m.Sum.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Min != nil { + l = m.Min.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Max != nil { + l = m.Max.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Chunk_Encoding(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Series) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Series: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, AggrChunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AggrChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggrChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggrChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Raw == nil { + m.Raw = &Chunk{} + } + if err := m.Raw.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Count == nil { + m.Count = &Chunk{} + } + if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sum == nil { + m.Sum = &Chunk{} + } + if err := m.Sum.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Min == nil { + m.Min = &Chunk{} + } + if err := m.Min.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Max == nil { + m.Max = &Chunk{} + } + if err := m.Max.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Chunk{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= LabelMatcher_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto new file mode 100644 index 0000000000..4c358b3cc4 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/types.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; +package thanos; + +option go_package = "storepb"; + +import "gogoproto/gogo.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Label { + string name = 1; + string value = 2; +} + +message Chunk { + enum Encoding { + XOR = 0; + } + Encoding type = 1; + bytes data = 2; +} + +message Series { + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; +} + +message AggrChunk { + int64 min_time = 1; + int64 max_time = 2; + + Chunk raw = 3; + Chunk count = 4; + Chunk sum = 5; + Chunk min = 6; + Chunk max = 7; + Chunk counter = 8; +} + +// Matcher specifies a rule, which can match or set of labels or not. +message LabelMatcher { + enum Type { + EQ = 0; // = + NEQ = 1; // != + RE = 2; // =~ + NRE = 3; // !~ + } + Type type = 1; + string name = 2; + string value = 3; +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go new file mode 100644 index 0000000000..38f3b5958d --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -0,0 +1,232 @@ +package store + +import ( + "context" + "math" + "sort" + + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/labels" + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store/storepb" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// TSDBStore implements the store API against a local TSDB instance. +// It attaches the provided external labels to all results. It only responds with raw data +// and does not support downsampling. +type TSDBStore struct { + logger log.Logger + db *tsdb.DB + component component.SourceStoreAPI + externalLabels labels.Labels +} + +// NewTSDBStore creates a new TSDBStore. +func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db *tsdb.DB, component component.SourceStoreAPI, externalLabels labels.Labels) *TSDBStore { + if logger == nil { + logger = log.NewNopLogger() + } + return &TSDBStore{ + logger: logger, + db: db, + component: component, + externalLabels: externalLabels, + } +} + +// Info returns store information about the Prometheus instance. +func (s *TSDBStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { + res := &storepb.InfoResponse{ + Labels: make([]storepb.Label, 0, len(s.externalLabels)), + StoreType: s.component.ToProto(), + MinTime: 0, + MaxTime: math.MaxInt64, + } + if blocks := s.db.Blocks(); len(blocks) > 0 { + res.MinTime = blocks[0].Meta().MinTime + } + for _, l := range s.externalLabels { + res.Labels = append(res.Labels, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + + // Until we deprecate the single labels in the reply, we just duplicate + // them here for migration/compatibility purposes. + res.LabelSets = []storepb.LabelSet{} + if len(res.Labels) > 0 { + res.LabelSets = append(res.LabelSets, storepb.LabelSet{ + Labels: res.Labels, + }) + } + return res, nil +} + +// Series returns all series for a requested time range and label matcher. The returned data may +// exceed the requested time bounds. +func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + match, newMatchers, err := matchesExternalLabels(r.Matchers, s.externalLabels) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + if !match { + return nil + } + + if len(newMatchers) == 0 { + return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) + } + + matchers, err := translateMatchers(newMatchers) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + q, err := s.db.Querier(r.MinTime, r.MaxTime) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier series") + + set, err := q.Select(matchers...) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + var respSeries storepb.Series + + for set.Next() { + series := set.At() + + // TODO(fabxc): An improvement over this trivial approach would be to directly + // use the chunks provided by TSDB in the response. + // But since the sidecar has a similar approach, optimizing here has only + // limited benefit for now. + // NOTE: XOR encoding supports a max size of 2^16 - 1 samples, so we need + // to chunk all samples into groups of no more than 2^16 - 1 + // See: https://github.com/thanos-io/thanos/pull/1038 + c, err := s.encodeChunks(series.Iterator(), math.MaxUint16) + if err != nil { + return status.Errorf(codes.Internal, "encode chunk: %s", err) + } + + respSeries.Labels = s.translateAndExtendLabels(series.Labels(), s.externalLabels) + respSeries.Chunks = append(respSeries.Chunks[:0], c...) + + if err := srv.Send(storepb.NewSeriesResponse(&respSeries)); err != nil { + return status.Error(codes.Aborted, err.Error()) + } + } + return nil +} + +func (s *TSDBStore) encodeChunks(it tsdb.SeriesIterator, maxSamplesPerChunk int) (chks []storepb.AggrChunk, err error) { + var ( + chkMint int64 + chk *chunkenc.XORChunk + app chunkenc.Appender + isNext = it.Next() + ) + + for isNext { + if chk == nil { + chk = chunkenc.NewXORChunk() + app, err = chk.Appender() + if err != nil { + return nil, err + } + chkMint, _ = it.At() + } + + app.Append(it.At()) + chkMaxt, _ := it.At() + + isNext = it.Next() + if isNext && chk.NumSamples() < maxSamplesPerChunk { + continue + } + + // Cut the chunk. + chks = append(chks, storepb.AggrChunk{ + MinTime: chkMint, + MaxTime: chkMaxt, + Raw: &storepb.Chunk{Type: storepb.Chunk_XOR, Data: chk.Bytes()}, + }) + chk = nil + } + if it.Err() != nil { + return nil, errors.Wrap(it.Err(), "read TSDB series") + } + + return chks, nil + +} + +// translateAndExtendLabels transforms a metrics into a protobuf label set. It additionally +// attaches the given labels to it, overwriting existing ones on collision. +func (s *TSDBStore) translateAndExtendLabels(m, extend labels.Labels) []storepb.Label { + lset := make([]storepb.Label, 0, len(m)+len(extend)) + + for _, l := range m { + if extend.Get(l.Name) != "" { + continue + } + lset = append(lset, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + for _, l := range extend { + lset = append(lset, storepb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + sort.Slice(lset, func(i, j int) bool { + return lset[i].Name < lset[j].Name + }) + return lset +} + +// LabelNames returns all known label names. +func (s *TSDBStore) LabelNames(ctx context.Context, _ *storepb.LabelNamesRequest) ( + *storepb.LabelNamesResponse, error, +) { + q, err := s.db.Querier(math.MinInt64, math.MaxInt64) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label names") + + res, err := q.LabelNames() + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &storepb.LabelNamesResponse{Names: res}, nil +} + +// LabelValues returns all known label values for a given label name. +func (s *TSDBStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( + *storepb.LabelValuesResponse, error, +) { + q, err := s.db.Querier(math.MinInt64, math.MaxInt64) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer runutil.CloseWithLogOnErr(s.logger, q, "close tsdb querier label values") + + res, err := q.LabelValues(r.Label) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &storepb.LabelValuesResponse{Values: res}, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go new file mode 100644 index 0000000000..7a7d4f9509 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/strutil/merge.go @@ -0,0 +1,57 @@ +package strutil + +import ( + "sort" + "strings" +) + +// MergeSlices merges a set of sorted string slices into a single ones +// while removing all duplicates. +func MergeSlices(a ...[]string) []string { + if len(a) == 0 { + return nil + } + if len(a) == 1 { + return a[0] + } + l := len(a) / 2 + return mergeTwoStringSlices(MergeSlices(a[:l]...), MergeSlices(a[l:]...)) +} + +// MergeUnsortedSlices behaves like StringSlices but input slices are validated +// for sortedness and are sorted if they are not ordered yet. +func MergeUnsortedSlices(a ...[]string) []string { + for _, s := range a { + if !sort.StringsAreSorted(s) { + sort.Strings(s) + } + } + return MergeSlices(a...) +} + +func mergeTwoStringSlices(a, b []string) []string { + maxl := len(a) + if len(b) > len(a) { + maxl = len(b) + } + res := make([]string, 0, maxl*10/9) + + for len(a) > 0 && len(b) > 0 { + d := strings.Compare(a[0], b[0]) + + if d == 0 { + res = append(res, a[0]) + a, b = a[1:], b[1:] + } else if d < 0 { + res = append(res, a[0]) + a = a[1:] + } else if d > 0 { + res = append(res, b[0]) + b = b[1:] + } + } + // Append all remaining elements. + res = append(res, a...) + res = append(res, b...) + return res +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go new file mode 100644 index 0000000000..ee13810ca7 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/grpc.go @@ -0,0 +1,37 @@ +package tracing + +import ( + "context" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" + opentracing "github.com/opentracing/opentracing-go" + "google.golang.org/grpc" +) + +// UnaryClientInterceptor returns a new unary client interceptor for OpenTracing. +func UnaryClientInterceptor(tracer opentracing.Tracer) grpc.UnaryClientInterceptor { + return grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(tracer)) +} + +// StreamClientInterceptor returns a new streaming client interceptor for OpenTracing. +func StreamClientInterceptor(tracer opentracing.Tracer) grpc.StreamClientInterceptor { + return grpc_opentracing.StreamClientInterceptor(grpc_opentracing.WithTracer(tracer)) +} + +// UnaryServerInterceptor returns a new unary server interceptor for OpenTracing and injects given tracer. +func UnaryServerInterceptor(tracer opentracing.Tracer) grpc.UnaryServerInterceptor { + return func(parentCtx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(tracer))(ContextWithTracer(parentCtx, tracer), req, info, handler) + } +} + +// StreamServerInterceptor returns a new streaming server interceptor for OpenTracing and injects given tracer. +func StreamServerInterceptor(tracer opentracing.Tracer) grpc.StreamServerInterceptor { + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + wrappedStream := grpc_middleware.WrapServerStream(stream) + wrappedStream.WrappedContext = ContextWithTracer(stream.Context(), tracer) + + return grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(tracer))(srv, wrappedStream, info, handler) + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/http.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/http.go new file mode 100644 index 0000000000..9c17496d6d --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/http.go @@ -0,0 +1,99 @@ +package tracing + +import ( + "fmt" + "net" + "net/http" + "strconv" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +// HTTPMiddleware returns HTTP handler that injects given tracer and starts new server span. If any client span is fetched +// wire we include that as our parent. +func HTTPMiddleware(tracer opentracing.Tracer, name string, logger log.Logger, next http.Handler) http.HandlerFunc { + operationName := fmt.Sprintf("/%s HTTP[server]", name) + + return func(w http.ResponseWriter, r *http.Request) { + var span opentracing.Span + wireContext, err := tracer.Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(r.Header), + ) + if err != nil && err != opentracing.ErrSpanContextNotFound { + level.Error(logger).Log("msg", "failed to extract tracer from request", "operationName", operationName, "err", err) + } + + span = tracer.StartSpan(operationName, ext.RPCServerOption(wireContext)) + ext.HTTPMethod.Set(span, r.Method) + ext.HTTPUrl.Set(span, r.URL.String()) + + // If client specified ForceTracingBaggageKey header, ensure span includes it to force tracing. + span.SetBaggageItem(ForceTracingBaggageKey, r.Header.Get(ForceTracingBaggageKey)) + + if t, ok := tracer.(Tracer); ok { + if traceID, ok := t.GetTraceIDFromSpanContext(span.Context()); ok { + w.Header().Set(traceIDResponseHeader, traceID) + } + } + + next.ServeHTTP(w, r.WithContext(opentracing.ContextWithSpan(ContextWithTracer(r.Context(), tracer), span))) + span.Finish() + } +} + +type tripperware struct { + logger log.Logger + next http.RoundTripper +} + +func (t *tripperware) RoundTrip(r *http.Request) (*http.Response, error) { + tracer := tracerFromContext(r.Context()) + if tracer == nil { + // No tracer, programmatic mistake. + level.Warn(t.logger).Log("msg", "Tracer not found in context.") + return t.next.RoundTrip(r) + } + + span := opentracing.SpanFromContext(r.Context()) + if span == nil { + // No span. + return t.next.RoundTrip(r) + } + + ext.HTTPMethod.Set(span, r.Method) + ext.HTTPUrl.Set(span, r.URL.String()) + host, portString, err := net.SplitHostPort(r.URL.Host) + if err == nil { + ext.PeerHostname.Set(span, host) + if port, err := strconv.Atoi(portString); err != nil { + ext.PeerPort.Set(span, uint16(port)) + } + } else { + ext.PeerHostname.Set(span, r.URL.Host) + } + + // There's nothing we can do with any errors here. + if err = tracer.Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(r.Header), + ); err != nil { + level.Warn(t.logger).Log("msg", "failed to inject trace", "err", err) + } + + resp, err := t.next.RoundTrip(r) + return resp, err +} + +// HTTPTripperware returns HTTP tripper that assumes given span in context as client child span and injects it into the wire. +// NOTE: It assumes tracer is given in request context. Also, it is caller responsibility to finish span. +func HTTPTripperware(logger log.Logger, next http.RoundTripper) http.RoundTripper { + return &tripperware{ + logger: logger, + next: next, + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing.go b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing.go new file mode 100644 index 0000000000..f3031b48a5 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/tracing/tracing.go @@ -0,0 +1,52 @@ +package tracing + +import ( + "context" + + "github.com/opentracing/opentracing-go" +) + +// ForceTracingBaggageKey - force sampling header. +const ForceTracingBaggageKey = "X-Thanos-Force-Tracing" + +// traceIdResponseHeader - Trace ID response header. +const traceIDResponseHeader = "X-Thanos-Trace-Id" + +type contextKey struct{} + +var tracerKey = contextKey{} + +// Tracer interface to provide GetTraceIDFromSpanContext method. +type Tracer interface { + GetTraceIDFromSpanContext(ctx opentracing.SpanContext) (string, bool) +} + +// ContextWithTracer returns a new `context.Context` that holds a reference to given opentracing.Tracer. +func ContextWithTracer(ctx context.Context, tracer opentracing.Tracer) context.Context { + return context.WithValue(ctx, tracerKey, tracer) +} + +func tracerFromContext(ctx context.Context) opentracing.Tracer { + val := ctx.Value(tracerKey) + if sp, ok := val.(opentracing.Tracer); ok { + return sp + } + return nil +} + +// StartSpan starts and returns span with `operationName` using any Span found within given context. +// It uses traces propagated in context. +func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { + tracer := tracerFromContext(ctx) + if tracer == nil { + // No tracing found, return noop span. + return opentracing.NoopTracer{}.StartSpan(operationName), ctx + } + + var span opentracing.Span + if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, opentracing.ChildOf(parentSpan.Context())) + } + span = tracer.StartSpan(operationName, opts...) + return span, opentracing.ContextWithSpan(ctx, span) +} diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml index 58957222a3..0f3769e5fa 100644 --- a/vendor/go.uber.org/atomic/.travis.yml +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -3,9 +3,13 @@ language: go go_import_path: go.uber.org/atomic go: - - 1.7 - - 1.8 - - 1.9 + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes cache: directories: @@ -15,9 +19,9 @@ install: - make install_ci script: - - make test_ci - - scripts/test-ubergo.sh - - make lint + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index dfc63d9db4..1ef263075d 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -1,24 +1,13 @@ -PACKAGES := $(shell glide nv) # Many Go tools take file globs or directories as arguments instead of packages. PACKAGE_FILES ?= *.go - -# The linting tools evolve with each Go version, so run them only on the latest -# stable release. -GO_VERSION := $(shell go version | cut -d " " -f 3) -GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) -LINTABLE_MINOR_VERSIONS := 7 8 -ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) -SHOULD_LINT := true -endif - - +# For pre go1.6 export GO15VENDOREXPERIMENT=1 .PHONY: build build: - go build -i $(PACKAGES) + go build -i ./... .PHONY: install @@ -29,7 +18,7 @@ install: .PHONY: test test: - go test -cover -race $(PACKAGES) + go test -cover -race ./... .PHONY: install_ci @@ -37,26 +26,24 @@ install_ci: install go get github.com/wadey/gocovmerge go get github.com/mattn/goveralls go get golang.org/x/tools/cmd/cover -ifdef SHOULD_LINT - go get github.com/golang/lint/golint -endif + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + .PHONY: lint lint: -ifdef SHOULD_LINT @rm -rf lint.log @echo "Checking formatting..." @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log @echo "Checking vet..." - @$(foreach dir,$(PACKAGE_FILES),go tool vet $(dir) 2>&1 | tee -a lint.log;) + @go vet ./... 2>&1 | tee -a lint.log;) @echo "Checking lint..." - @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @golint $$(go list ./...) 2>&1 | tee -a lint.log @echo "Checking for unresolved FIXMEs..." @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log @[ ! -s lint.log ] -else - @echo "Skipping linters on" $(GO_VERSION) -endif .PHONY: test_ci diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index 6505abf65c..62eb8e5760 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -23,13 +23,13 @@ See the [documentation][doc] for a complete API specification. ## Development Status Stable. -
+___ Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.org/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.org/uber-go/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 0000000000..0489d19bad --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 0000000000..b423feaea9 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 0000000000..10f46948dc --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 0000000000..2fc1ec0312 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 0000000000..74a6e7332a --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,243 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 0000000000..a481b2243f --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 0000000000..baf7b551da --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 0000000000..c160e1a4e3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,289 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 0000000000..4d31dd0fdc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 0000000000..5593b1b3dc --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,750 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 0000000000..30e2fcd581 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 0000000000..578e947b3b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,281 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 0000000000..3168a8aa3c --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 0000000000..da156a1ba6 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 0000000000..52c414db0e --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 0000000000..efd689af4b --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 0000000000..200617ea86 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,181 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icannNode = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 0000000000..ba49b91bd3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 0000000000..aa986f7782 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000000..76b5f507fa --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false + return + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000000..fa7fb1bd7b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 0000000000..6c8d975d40 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 0000000000..d579eaef40 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // cryptography facilities + msa4 facility = 77 // message-security-assist extension 4 + msa8 facility = 146 // message-security-assist extension 8 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000000..f55e0c82c7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000000..cda87b1a1b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000000..dd1e76dc92 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 0000000000..e5037d92e0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 0000000000..bd9bbda0c0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000000..d70d317f5a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1< + +- [Overview](#overview) +- [Features](#features) +- [User-visible changes between v1 and v2](#user-visible-changes-between-v1-and-v2) + - [Flags can be used at any point after their definition.](#flags-can-be-used-at-any-point-after-their-definition) + - [Short flags can be combined with their parameters](#short-flags-can-be-combined-with-their-parameters) +- [API changes between v1 and v2](#api-changes-between-v1-and-v2) +- [Versions](#versions) + - [V2 is the current stable version](#v2-is-the-current-stable-version) + - [V1 is the OLD stable version](#v1-is-the-old-stable-version) +- [Change History](#change-history) +- [Examples](#examples) + - [Simple Example](#simple-example) + - [Complex Example](#complex-example) +- [Reference Documentation](#reference-documentation) + - [Displaying errors and usage information](#displaying-errors-and-usage-information) + - [Sub-commands](#sub-commands) + - [Custom Parsers](#custom-parsers) + - [Repeatable flags](#repeatable-flags) + - [Boolean Values](#boolean-values) + - [Default Values](#default-values) + - [Place-holders in Help](#place-holders-in-help) + - [Consuming all remaining arguments](#consuming-all-remaining-arguments) + - [Bash/ZSH Shell Completion](#bashzsh-shell-completion) + - [Supporting -h for help](#supporting--h-for-help) + - [Custom help](#custom-help) + + + +## Overview + +Kingpin is a [fluent-style](http://en.wikipedia.org/wiki/Fluent_interface), +type-safe command-line parser. It supports flags, nested commands, and +positional arguments. + +Install it with: + + $ go get gopkg.in/alecthomas/kingpin.v2 + +It looks like this: + +```go +var ( + verbose = kingpin.Flag("verbose", "Verbose mode.").Short('v').Bool() + name = kingpin.Arg("name", "Name of user.").Required().String() +) + +func main() { + kingpin.Parse() + fmt.Printf("%v, %s\n", *verbose, *name) +} +``` + +More [examples](https://github.com/alecthomas/kingpin/tree/master/_examples) are available. + +Second to parsing, providing the user with useful help is probably the most +important thing a command-line parser does. Kingpin tries to provide detailed +contextual help if `--help` is encountered at any point in the command line +(excluding after `--`). + +## Features + +- Help output that isn't as ugly as sin. +- Fully [customisable help](#custom-help), via Go templates. +- Parsed, type-safe flags (`kingpin.Flag("f", "help").Int()`) +- Parsed, type-safe positional arguments (`kingpin.Arg("a", "help").Int()`). +- Parsed, type-safe, arbitrarily deep commands (`kingpin.Command("c", "help")`). +- Support for required flags and required positional arguments (`kingpin.Flag("f", "").Required().Int()`). +- Support for arbitrarily nested default commands (`command.Default()`). +- Callbacks per command, flag and argument (`kingpin.Command("c", "").Action(myAction)`). +- POSIX-style short flag combining (`-a -b` -> `-ab`). +- Short-flag+parameter combining (`-a parm` -> `-aparm`). +- Read command-line from files (`@`). +- Automatically generate man pages (`--help-man`). + +## User-visible changes between v1 and v2 + +### Flags can be used at any point after their definition. + +Flags can be specified at any point after their definition, not just +*immediately after their associated command*. From the chat example below, the +following used to be required: + +``` +$ chat --server=chat.server.com:8080 post --image=~/Downloads/owls.jpg pics +``` + +But the following will now work: + +``` +$ chat post --server=chat.server.com:8080 --image=~/Downloads/owls.jpg pics +``` + +### Short flags can be combined with their parameters + +Previously, if a short flag was used, any argument to that flag would have to +be separated by a space. That is no longer the case. + +## API changes between v1 and v2 + +- `ParseWithFileExpansion()` is gone. The new parser directly supports expanding `@`. +- Added `FatalUsage()` and `FatalUsageContext()` for displaying an error + usage and terminating. +- `Dispatch()` renamed to `Action()`. +- Added `ParseContext()` for parsing a command line into its intermediate context form without executing. +- Added `Terminate()` function to override the termination function. +- Added `UsageForContextWithTemplate()` for printing usage via a custom template. +- Added `UsageTemplate()` for overriding the default template to use. Two templates are included: + 1. `DefaultUsageTemplate` - default template. + 2. `CompactUsageTemplate` - compact command template for larger applications. + +## Versions + +Kingpin uses [gopkg.in](https://gopkg.in/alecthomas/kingpin) for versioning. + +The current stable version is [gopkg.in/alecthomas/kingpin.v2](https://gopkg.in/alecthomas/kingpin.v2). The previous version, [gopkg.in/alecthomas/kingpin.v1](https://gopkg.in/alecthomas/kingpin.v1), is deprecated and in maintenance mode. + +### [V2](https://gopkg.in/alecthomas/kingpin.v2) is the current stable version + +Installation: + +```sh +$ go get gopkg.in/alecthomas/kingpin.v2 +``` + +### [V1](https://gopkg.in/alecthomas/kingpin.v1) is the OLD stable version + +Installation: + +```sh +$ go get gopkg.in/alecthomas/kingpin.v1 +``` + +## Change History + +- *2015-09-19* -- Stable v2.1.0 release. + - Added `command.Default()` to specify a default command to use if no other + command matches. This allows for convenient user shortcuts. + - Exposed `HelpFlag` and `VersionFlag` for further customisation. + - `Action()` and `PreAction()` added and both now support an arbitrary + number of callbacks. + - `kingpin.SeparateOptionalFlagsUsageTemplate`. + - `--help-long` and `--help-man` (hidden by default) flags. + - Flags are "interspersed" by default, but can be disabled with `app.Interspersed(false)`. + - Added flags for all simple builtin types (int8, uint16, etc.) and slice variants. + - Use `app.Writer(os.Writer)` to specify the default writer for all output functions. + - Dropped `os.Writer` prefix from all printf-like functions. + +- *2015-05-22* -- Stable v2.0.0 release. + - Initial stable release of v2.0.0. + - Fully supports interspersed flags, commands and arguments. + - Flags can be present at any point after their logical definition. + - Application.Parse() terminates if commands are present and a command is not parsed. + - Dispatch() -> Action(). + - Actions are dispatched after all values are populated. + - Override termination function (defaults to os.Exit). + - Override output stream (defaults to os.Stderr). + - Templatised usage help, with default and compact templates. + - Make error/usage functions more consistent. + - Support argument expansion from files by default (with @). + - Fully public data model is available via .Model(). + - Parser has been completely refactored. + - Parsing and execution has been split into distinct stages. + - Use `go generate` to generate repeated flags. + - Support combined short-flag+argument: -fARG. + +- *2015-01-23* -- Stable v1.3.4 release. + - Support "--" for separating flags from positional arguments. + - Support loading flags from files (ParseWithFileExpansion()). Use @FILE as an argument. + - Add post-app and post-cmd validation hooks. This allows arbitrary validation to be added. + - A bunch of improvements to help usage and formatting. + - Support arbitrarily nested sub-commands. + +- *2014-07-08* -- Stable v1.2.0 release. + - Pass any value through to `Strings()` when final argument. + Allows for values that look like flags to be processed. + - Allow `--help` to be used with commands. + - Support `Hidden()` flags. + - Parser for [units.Base2Bytes](https://github.com/alecthomas/units) + type. Allows for flags like `--ram=512MB` or `--ram=1GB`. + - Add an `Enum()` value, allowing only one of a set of values + to be selected. eg. `Flag(...).Enum("debug", "info", "warning")`. + +- *2014-06-27* -- Stable v1.1.0 release. + - Bug fixes. + - Always return an error (rather than panicing) when misconfigured. + - `OpenFile(flag, perm)` value type added, for finer control over opening files. + - Significantly improved usage formatting. + +- *2014-06-19* -- Stable v1.0.0 release. + - Support [cumulative positional](#consuming-all-remaining-arguments) arguments. + - Return error rather than panic when there are fatal errors not caught by + the type system. eg. when a default value is invalid. + - Use gokpg.in. + +- *2014-06-10* -- Place-holder streamlining. + - Renamed `MetaVar` to `PlaceHolder`. + - Removed `MetaVarFromDefault`. Kingpin now uses [heuristics](#place-holders-in-help) + to determine what to display. + +## Examples + +### Simple Example + +Kingpin can be used for simple flag+arg applications like so: + +``` +$ ping --help +usage: ping [] [] + +Flags: + --debug Enable debug mode. + --help Show help. + -t, --timeout=5s Timeout waiting for ping. + +Args: + IP address to ping. + [] Number of packets to send +$ ping 1.2.3.4 5 +Would ping: 1.2.3.4 with timeout 5s and count 5 +``` + +From the following source: + +```go +package main + +import ( + "fmt" + + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + debug = kingpin.Flag("debug", "Enable debug mode.").Bool() + timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").OverrideDefaultFromEnvar("PING_TIMEOUT").Short('t').Duration() + ip = kingpin.Arg("ip", "IP address to ping.").Required().IP() + count = kingpin.Arg("count", "Number of packets to send").Int() +) + +func main() { + kingpin.Version("0.0.1") + kingpin.Parse() + fmt.Printf("Would ping: %s with timeout %s and count %d\n", *ip, *timeout, *count) +} +``` + +### Complex Example + +Kingpin can also produce complex command-line applications with global flags, +subcommands, and per-subcommand flags, like this: + +``` +$ chat --help +usage: chat [] [] [ ...] + +A command-line chat application. + +Flags: + --help Show help. + --debug Enable debug mode. + --server=127.0.0.1 Server address. + +Commands: + help [] + Show help for a command. + + register + Register a new user. + + post [] [] + Post a message to a channel. + +$ chat help post +usage: chat [] post [] [] + +Post a message to a channel. + +Flags: + --image=IMAGE Image to post. + +Args: + Channel to post to. + [] Text to post. + +$ chat post --image=~/Downloads/owls.jpg pics +... +``` + +From this code: + +```go +package main + +import ( + "os" + "strings" + "gopkg.in/alecthomas/kingpin.v2" +) + +var ( + app = kingpin.New("chat", "A command-line chat application.") + debug = app.Flag("debug", "Enable debug mode.").Bool() + serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP() + + register = app.Command("register", "Register a new user.") + registerNick = register.Arg("nick", "Nickname for user.").Required().String() + registerName = register.Arg("name", "Name of user.").Required().String() + + post = app.Command("post", "Post a message to a channel.") + postImage = post.Flag("image", "Image to post.").File() + postChannel = post.Arg("channel", "Channel to post to.").Required().String() + postText = post.Arg("text", "Text to post.").Strings() +) + +func main() { + switch kingpin.MustParse(app.Parse(os.Args[1:])) { + // Register user + case register.FullCommand(): + println(*registerNick) + + // Post message + case post.FullCommand(): + if *postImage != nil { + } + text := strings.Join(*postText, " ") + println("Post:", text) + } +} +``` + +## Reference Documentation + +### Displaying errors and usage information + +Kingpin exports a set of functions to provide consistent errors and usage +information to the user. + +Error messages look something like this: + + : error: + +The functions on `Application` are: + +Function | Purpose +---------|-------------- +`Errorf(format, args)` | Display a printf formatted error to the user. +`Fatalf(format, args)` | As with Errorf, but also call the termination handler. +`FatalUsage(format, args)` | As with Fatalf, but also print contextual usage information. +`FatalUsageContext(context, format, args)` | As with Fatalf, but also print contextual usage information from a `ParseContext`. +`FatalIfError(err, format, args)` | Conditionally print an error prefixed with format+args, then call the termination handler + +There are equivalent global functions in the kingpin namespace for the default +`kingpin.CommandLine` instance. + +### Sub-commands + +Kingpin supports nested sub-commands, with separate flag and positional +arguments per sub-command. Note that positional arguments may only occur after +sub-commands. + +For example: + +```go +var ( + deleteCommand = kingpin.Command("delete", "Delete an object.") + deleteUserCommand = deleteCommand.Command("user", "Delete a user.") + deleteUserUIDFlag = deleteUserCommand.Flag("uid", "Delete user by UID rather than username.") + deleteUserUsername = deleteUserCommand.Arg("username", "Username to delete.") + deletePostCommand = deleteCommand.Command("post", "Delete a post.") +) + +func main() { + switch kingpin.Parse() { + case "delete user": + case "delete post": + } +} +``` + +### Custom Parsers + +Kingpin supports both flag and positional argument parsers for converting to +Go types. For example, some included parsers are `Int()`, `Float()`, +`Duration()` and `ExistingFile()` (see [parsers.go](./parsers.go) for a complete list of included parsers). + +Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value) +interface, so any existing implementations will work. + +For example, a parser for accumulating HTTP header values might look like this: + +```go +type HTTPHeaderValue http.Header + +func (h *HTTPHeaderValue) Set(value string) error { + parts := strings.SplitN(value, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("expected HEADER:VALUE got '%s'", value) + } + (*http.Header)(h).Add(parts[0], parts[1]) + return nil +} + +func (h *HTTPHeaderValue) String() string { + return "" +} +``` + +As a convenience, I would recommend something like this: + +```go +func HTTPHeader(s Settings) (target *http.Header) { + target = &http.Header{} + s.SetValue((*HTTPHeaderValue)(target)) + return +} +``` + +You would use it like so: + +```go +headers = HTTPHeader(kingpin.Flag("header", "Add a HTTP header to the request.").Short('H')) +``` + +### Repeatable flags + +Depending on the `Value` they hold, some flags may be repeated. The +`IsCumulative() bool` function on `Value` tells if it's safe to call `Set()` +multiple times or if an error should be raised if several values are passed. + +The built-in `Value`s returning slices and maps, as well as `Counter` are +examples of `Value`s that make a flag repeatable. + +### Boolean values + +Boolean values are uniquely managed by Kingpin. Each boolean flag will have a negative complement: +`--` and `--no-`. + +### Default Values + +The default value is the zero value for a type. This can be overridden with +the `Default(value...)` function on flags and arguments. This function accepts +one or several strings, which are parsed by the value itself, so they *must* +be compliant with the format expected. + +### Place-holders in Help + +The place-holder value for a flag is the value used in the help to describe +the value of a non-boolean flag. + +The value provided to PlaceHolder() is used if provided, then the value +provided by Default() if provided, then finally the capitalised flag name is +used. + +Here are some examples of flags with various permutations: + + --name=NAME // Flag(...).String() + --name="Harry" // Flag(...).Default("Harry").String() + --name=FULL-NAME // Flag(...).PlaceHolder("FULL-NAME").Default("Harry").String() + +### Consuming all remaining arguments + +A common command-line idiom is to use all remaining arguments for some +purpose. eg. The following command accepts an arbitrary number of +IP addresses as positional arguments: + + ./cmd ping 10.1.1.1 192.168.1.1 + +Such arguments are similar to [repeatable flags](#repeatable-flags), but for +arguments. Therefore they use the same `IsCumulative() bool` function on the +underlying `Value`, so the built-in `Value`s for which the `Set()` function +can be called several times will consume multiple arguments. + +To implement the above example with a custom `Value`, we might do something +like this: + +```go +type ipList []net.IP + +func (i *ipList) Set(value string) error { + if ip := net.ParseIP(value); ip == nil { + return fmt.Errorf("'%s' is not an IP address", value) + } else { + *i = append(*i, ip) + return nil + } +} + +func (i *ipList) String() string { + return "" +} + +func (i *ipList) IsCumulative() bool { + return true +} + +func IPList(s Settings) (target *[]net.IP) { + target = new([]net.IP) + s.SetValue((*ipList)(target)) + return +} +``` + +And use it like so: + +```go +ips := IPList(kingpin.Arg("ips", "IP addresses to ping.")) +``` + +### Bash/ZSH Shell Completion + +By default, all flags and commands/subcommands generate completions +internally. + +Out of the box, CLI tools using kingpin should be able to take advantage +of completion hinting for flags and commands. By specifying +`--completion-bash` as the first argument, your CLI tool will show +possible subcommands. By ending your argv with `--`, hints for flags +will be shown. + +To allow your end users to take advantage you must package a +`/etc/bash_completion.d` script with your distribution (or the equivalent +for your target platform/shell). An alternative is to instruct your end +user to source a script from their `bash_profile` (or equivalent). + +Fortunately Kingpin makes it easy to generate or source a script for use +with end users shells. `./yourtool --completion-script-bash` and +`./yourtool --completion-script-zsh` will generate these scripts for you. + +**Installation by Package** + +For the best user experience, you should bundle your pre-created +completion script with your CLI tool and install it inside +`/etc/bash_completion.d` (or equivalent). A good suggestion is to add +this as an automated step to your build pipeline, in the implementation +is improved for bug fixed. + +**Installation by `bash_profile`** + +Alternatively, instruct your users to add an additional statement to +their `bash_profile` (or equivalent): + +``` +eval "$(your-cli-tool --completion-script-bash)" +``` + +Or for ZSH + +``` +eval "$(your-cli-tool --completion-script-zsh)" +``` + +#### Additional API +To provide more flexibility, a completion option API has been +exposed for flags to allow user defined completion options, to extend +completions further than just EnumVar/Enum. + + +**Provide Static Options** + +When using an `Enum` or `EnumVar`, users are limited to only the options +given. Maybe we wish to hint possible options to the user, but also +allow them to provide their own custom option. `HintOptions` gives +this functionality to flags. + +``` +app := kingpin.New("completion", "My application with bash completion.") +app.Flag("port", "Provide a port to connect to"). + Required(). + HintOptions("80", "443", "8080"). + IntVar(&c.port) +``` + +**Provide Dynamic Options** +Consider the case that you needed to read a local database or a file to +provide suggestions. You can dynamically generate the options + +``` +func listHosts() []string { + // Provide a dynamic list of hosts from a hosts file or otherwise + // for bash completion. In this example we simply return static slice. + + // You could use this functionality to reach into a hosts file to provide + // completion for a list of known hosts. + return []string{"sshhost.example", "webhost.example", "ftphost.example"} +} + +app := kingpin.New("completion", "My application with bash completion.") +app.Flag("flag-1", "").HintAction(listHosts).String() +``` + +**EnumVar/Enum** +When using `Enum` or `EnumVar`, any provided options will be automatically +used for bash autocompletion. However, if you wish to provide a subset or +different options, you can use `HintOptions` or `HintAction` which will override +the default completion options for `Enum`/`EnumVar`. + + +**Examples** +You can see an in depth example of the completion API within +`examples/completion/main.go` + + +### Supporting -h for help + +`kingpin.CommandLine.HelpFlag.Short('h')` + +### Custom help + +Kingpin v2 supports templatised help using the text/template library (actually, [a fork](https://github.com/alecthomas/template)). + +You can specify the template to use with the [Application.UsageTemplate()](http://godoc.org/gopkg.in/alecthomas/kingpin.v2#Application.UsageTemplate) function. + +There are four included templates: `kingpin.DefaultUsageTemplate` is the default, +`kingpin.CompactUsageTemplate` provides a more compact representation for more complex command-line structures, +`kingpin.SeparateOptionalFlagsUsageTemplate` looks like the default template, but splits required +and optional command flags into separate lists, and `kingpin.ManPageTemplate` is used to generate man pages. + +See the above templates for examples of usage, and the the function [UsageForContextWithTemplate()](https://github.com/alecthomas/kingpin/blob/master/usage.go#L198) method for details on the context. + +#### Default help template + +``` +$ go run ./examples/curl/curl.go --help +usage: curl [] [ ...] + +An example implementation of curl. + +Flags: + --help Show help. + -t, --timeout=5s Set connection timeout. + -H, --headers=HEADER=VALUE + Add HTTP headers to the request. + +Commands: + help [...] + Show help. + + get url + Retrieve a URL. + + get file + Retrieve a file. + + post [] + POST a resource. +``` + +#### Compact help template + +``` +$ go run ./examples/curl/curl.go --help +usage: curl [] [ ...] + +An example implementation of curl. + +Flags: + --help Show help. + -t, --timeout=5s Set connection timeout. + -H, --headers=HEADER=VALUE + Add HTTP headers to the request. + +Commands: + help [...] + get [] + url + file + post [] +``` diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go new file mode 100644 index 0000000000..72d6cbd408 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go @@ -0,0 +1,42 @@ +package kingpin + +// Action callback executed at various stages after all values are populated. +// The application, commands, arguments and flags all have corresponding +// actions. +type Action func(*ParseContext) error + +type actionMixin struct { + actions []Action + preActions []Action +} + +type actionApplier interface { + applyActions(*ParseContext) error + applyPreActions(*ParseContext) error +} + +func (a *actionMixin) addAction(action Action) { + a.actions = append(a.actions, action) +} + +func (a *actionMixin) addPreAction(action Action) { + a.preActions = append(a.preActions, action) +} + +func (a *actionMixin) applyActions(context *ParseContext) error { + for _, action := range a.actions { + if err := action(context); err != nil { + return err + } + } + return nil +} + +func (a *actionMixin) applyPreActions(context *ParseContext) error { + for _, preAction := range a.preActions { + if err := preAction(context); err != nil { + return err + } + } + return nil +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go new file mode 100644 index 0000000000..1a1a5effd0 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go @@ -0,0 +1,688 @@ +package kingpin + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + ErrCommandNotSpecified = fmt.Errorf("command not specified") +) + +var ( + envarTransformRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`) +) + +type ApplicationValidator func(*Application) error + +// An Application contains the definitions of flags, arguments and commands +// for an application. +type Application struct { + cmdMixin + initialized bool + + Name string + Help string + + author string + version string + errorWriter io.Writer // Destination for errors. + usageWriter io.Writer // Destination for usage + usageTemplate string + validator ApplicationValidator + terminate func(status int) // See Terminate() + noInterspersed bool // can flags be interspersed with args (or must they come first) + defaultEnvars bool + completion bool + + // Help flag. Exposed for user customisation. + HelpFlag *FlagClause + // Help command. Exposed for user customisation. May be nil. + HelpCommand *CmdClause + // Version flag. Exposed for user customisation. May be nil. + VersionFlag *FlagClause +} + +// New creates a new Kingpin application instance. +func New(name, help string) *Application { + a := &Application{ + Name: name, + Help: help, + errorWriter: os.Stderr, // Left for backwards compatibility purposes. + usageWriter: os.Stderr, + usageTemplate: DefaultUsageTemplate, + terminate: os.Exit, + } + a.flagGroup = newFlagGroup() + a.argGroup = newArgGroup() + a.cmdGroup = newCmdGroup(a) + a.HelpFlag = a.Flag("help", "Show context-sensitive help (also try --help-long and --help-man).") + a.HelpFlag.Bool() + a.Flag("help-long", "Generate long help.").Hidden().PreAction(a.generateLongHelp).Bool() + a.Flag("help-man", "Generate a man page.").Hidden().PreAction(a.generateManPage).Bool() + a.Flag("completion-bash", "Output possible completions for the given args.").Hidden().BoolVar(&a.completion) + a.Flag("completion-script-bash", "Generate completion script for bash.").Hidden().PreAction(a.generateBashCompletionScript).Bool() + a.Flag("completion-script-zsh", "Generate completion script for ZSH.").Hidden().PreAction(a.generateZSHCompletionScript).Bool() + + return a +} + +func (a *Application) generateLongHelp(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, LongHelpTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateManPage(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, ManPageTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateBashCompletionScript(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, BashCompletionTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +func (a *Application) generateZSHCompletionScript(c *ParseContext) error { + a.Writer(os.Stdout) + if err := a.UsageForContextWithTemplate(c, 2, ZshCompletionTemplate); err != nil { + return err + } + a.terminate(0) + return nil +} + +// DefaultEnvars configures all flags (that do not already have an associated +// envar) to use a default environment variable in the form "_". +// +// For example, if the application is named "foo" and a flag is named "bar- +// waz" the environment variable: "FOO_BAR_WAZ". +func (a *Application) DefaultEnvars() *Application { + a.defaultEnvars = true + return a +} + +// Terminate specifies the termination handler. Defaults to os.Exit(status). +// If nil is passed, a no-op function will be used. +func (a *Application) Terminate(terminate func(int)) *Application { + if terminate == nil { + terminate = func(int) {} + } + a.terminate = terminate + return a +} + +// Writer specifies the writer to use for usage and errors. Defaults to os.Stderr. +// DEPRECATED: See ErrorWriter and UsageWriter. +func (a *Application) Writer(w io.Writer) *Application { + a.errorWriter = w + a.usageWriter = w + return a +} + +// ErrorWriter sets the io.Writer to use for errors. +func (a *Application) ErrorWriter(w io.Writer) *Application { + a.errorWriter = w + return a +} + +// UsageWriter sets the io.Writer to use for errors. +func (a *Application) UsageWriter(w io.Writer) *Application { + a.usageWriter = w + return a +} + +// UsageTemplate specifies the text template to use when displaying usage +// information. The default is UsageTemplate. +func (a *Application) UsageTemplate(template string) *Application { + a.usageTemplate = template + return a +} + +// Validate sets a validation function to run when parsing. +func (a *Application) Validate(validator ApplicationValidator) *Application { + a.validator = validator + return a +} + +// ParseContext parses the given command line and returns the fully populated +// ParseContext. +func (a *Application) ParseContext(args []string) (*ParseContext, error) { + return a.parseContext(false, args) +} + +func (a *Application) parseContext(ignoreDefault bool, args []string) (*ParseContext, error) { + if err := a.init(); err != nil { + return nil, err + } + context := tokenize(args, ignoreDefault) + err := parse(context, a) + return context, err +} + +// Parse parses command-line arguments. It returns the selected command and an +// error. The selected command will be a space separated subcommand, if +// subcommands have been configured. +// +// This will populate all flag and argument values, call all callbacks, and so +// on. +func (a *Application) Parse(args []string) (command string, err error) { + + context, parseErr := a.ParseContext(args) + selected := []string{} + var setValuesErr error + + if context == nil { + // Since we do not throw error immediately, there could be a case + // where a context returns nil. Protect against that. + return "", parseErr + } + + if err = a.setDefaults(context); err != nil { + return "", err + } + + selected, setValuesErr = a.setValues(context) + + if err = a.applyPreActions(context, !a.completion); err != nil { + return "", err + } + + if a.completion { + a.generateBashCompletion(context) + a.terminate(0) + } else { + if parseErr != nil { + return "", parseErr + } + + a.maybeHelp(context) + if !context.EOL() { + return "", fmt.Errorf("unexpected argument '%s'", context.Peek()) + } + + if setValuesErr != nil { + return "", setValuesErr + } + + command, err = a.execute(context, selected) + if err == ErrCommandNotSpecified { + a.writeUsage(context, nil) + } + } + return command, err +} + +func (a *Application) writeUsage(context *ParseContext, err error) { + if err != nil { + a.Errorf("%s", err) + } + if err := a.UsageForContext(context); err != nil { + panic(err) + } + if err != nil { + a.terminate(1) + } else { + a.terminate(0) + } +} + +func (a *Application) maybeHelp(context *ParseContext) { + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok && flag == a.HelpFlag { + // Re-parse the command-line ignoring defaults, so that help works correctly. + context, _ = a.parseContext(true, context.rawArgs) + a.writeUsage(context, nil) + } + } +} + +// Version adds a --version flag for displaying the application version. +func (a *Application) Version(version string) *Application { + a.version = version + a.VersionFlag = a.Flag("version", "Show application version.").PreAction(func(*ParseContext) error { + fmt.Fprintln(a.usageWriter, version) + a.terminate(0) + return nil + }) + a.VersionFlag.Bool() + return a +} + +// Author sets the author output by some help templates. +func (a *Application) Author(author string) *Application { + a.author = author + return a +} + +// Action callback to call when all values are populated and parsing is +// complete, but before any command, flag or argument actions. +// +// All Action() callbacks are called in the order they are encountered on the +// command line. +func (a *Application) Action(action Action) *Application { + a.addAction(action) + return a +} + +// Action called after parsing completes but before validation and execution. +func (a *Application) PreAction(action Action) *Application { + a.addPreAction(action) + return a +} + +// Command adds a new top-level command. +func (a *Application) Command(name, help string) *CmdClause { + return a.addCommand(name, help) +} + +// Interspersed control if flags can be interspersed with positional arguments +// +// true (the default) means that they can, false means that all the flags must appear before the first positional arguments. +func (a *Application) Interspersed(interspersed bool) *Application { + a.noInterspersed = !interspersed + return a +} + +func (a *Application) defaultEnvarPrefix() string { + if a.defaultEnvars { + return a.Name + } + return "" +} + +func (a *Application) init() error { + if a.initialized { + return nil + } + if a.cmdGroup.have() && a.argGroup.have() { + return fmt.Errorf("can't mix top-level Arg()s with Command()s") + } + + // If we have subcommands, add a help command at the top-level. + if a.cmdGroup.have() { + var command []string + a.HelpCommand = a.Command("help", "Show help.").PreAction(func(context *ParseContext) error { + a.Usage(command) + a.terminate(0) + return nil + }) + a.HelpCommand.Arg("command", "Show help on command.").StringsVar(&command) + // Make help first command. + l := len(a.commandOrder) + a.commandOrder = append(a.commandOrder[l-1:l], a.commandOrder[:l-1]...) + } + + if err := a.flagGroup.init(a.defaultEnvarPrefix()); err != nil { + return err + } + if err := a.cmdGroup.init(); err != nil { + return err + } + if err := a.argGroup.init(); err != nil { + return err + } + for _, cmd := range a.commands { + if err := cmd.init(); err != nil { + return err + } + } + flagGroups := []*flagGroup{a.flagGroup} + for _, cmd := range a.commandOrder { + if err := checkDuplicateFlags(cmd, flagGroups); err != nil { + return err + } + } + a.initialized = true + return nil +} + +// Recursively check commands for duplicate flags. +func checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error { + // Check for duplicates. + for _, flags := range flagGroups { + for _, flag := range current.flagOrder { + if flag.shorthand != 0 { + if _, ok := flags.short[string(flag.shorthand)]; ok { + return fmt.Errorf("duplicate short flag -%c", flag.shorthand) + } + } + if _, ok := flags.long[flag.name]; ok { + return fmt.Errorf("duplicate long flag --%s", flag.name) + } + } + } + flagGroups = append(flagGroups, current.flagGroup) + // Check subcommands. + for _, subcmd := range current.commandOrder { + if err := checkDuplicateFlags(subcmd, flagGroups); err != nil { + return err + } + } + return nil +} + +func (a *Application) execute(context *ParseContext, selected []string) (string, error) { + var err error + + if err = a.validateRequired(context); err != nil { + return "", err + } + + if err = a.applyValidators(context); err != nil { + return "", err + } + + if err = a.applyActions(context); err != nil { + return "", err + } + + command := strings.Join(selected, " ") + if command == "" && a.cmdGroup.have() { + return "", ErrCommandNotSpecified + } + return command, err +} + +func (a *Application) setDefaults(context *ParseContext) error { + flagElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok { + if flag.name == "help" { + return nil + } + flagElements[flag.name] = element + } + } + + argElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if arg, ok := element.Clause.(*ArgClause); ok { + argElements[arg.name] = element + } + } + + // Check required flags and set defaults. + for _, flag := range context.flags.long { + if flagElements[flag.name] == nil { + if err := flag.setDefault(); err != nil { + return err + } + } + } + + for _, arg := range context.arguments.args { + if argElements[arg.name] == nil { + if err := arg.setDefault(); err != nil { + return err + } + } + } + + return nil +} + +func (a *Application) validateRequired(context *ParseContext) error { + flagElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if flag, ok := element.Clause.(*FlagClause); ok { + flagElements[flag.name] = element + } + } + + argElements := map[string]*ParseElement{} + for _, element := range context.Elements { + if arg, ok := element.Clause.(*ArgClause); ok { + argElements[arg.name] = element + } + } + + // Check required flags and set defaults. + for _, flag := range context.flags.long { + if flagElements[flag.name] == nil { + // Check required flags were provided. + if flag.needsValue() { + return fmt.Errorf("required flag --%s not provided", flag.name) + } + } + } + + for _, arg := range context.arguments.args { + if argElements[arg.name] == nil { + if arg.needsValue() { + return fmt.Errorf("required argument '%s' not provided", arg.name) + } + } + } + return nil +} + +func (a *Application) setValues(context *ParseContext) (selected []string, err error) { + // Set all arg and flag values. + var ( + lastCmd *CmdClause + flagSet = map[string]struct{}{} + ) + for _, element := range context.Elements { + switch clause := element.Clause.(type) { + case *FlagClause: + if _, ok := flagSet[clause.name]; ok { + if v, ok := clause.value.(repeatableFlag); !ok || !v.IsCumulative() { + return nil, fmt.Errorf("flag '%s' cannot be repeated", clause.name) + } + } + if err = clause.value.Set(*element.Value); err != nil { + return + } + flagSet[clause.name] = struct{}{} + + case *ArgClause: + if err = clause.value.Set(*element.Value); err != nil { + return + } + + case *CmdClause: + if clause.validator != nil { + if err = clause.validator(clause); err != nil { + return + } + } + selected = append(selected, clause.name) + lastCmd = clause + } + } + + if lastCmd != nil && len(lastCmd.commands) > 0 { + return nil, fmt.Errorf("must select a subcommand of '%s'", lastCmd.FullCommand()) + } + + return +} + +func (a *Application) applyValidators(context *ParseContext) (err error) { + // Call command validation functions. + for _, element := range context.Elements { + if cmd, ok := element.Clause.(*CmdClause); ok && cmd.validator != nil { + if err = cmd.validator(cmd); err != nil { + return err + } + } + } + + if a.validator != nil { + err = a.validator(a) + } + return err +} + +func (a *Application) applyPreActions(context *ParseContext, dispatch bool) error { + if err := a.actionMixin.applyPreActions(context); err != nil { + return err + } + // Dispatch to actions. + if dispatch { + for _, element := range context.Elements { + if applier, ok := element.Clause.(actionApplier); ok { + if err := applier.applyPreActions(context); err != nil { + return err + } + } + } + } + + return nil +} + +func (a *Application) applyActions(context *ParseContext) error { + if err := a.actionMixin.applyActions(context); err != nil { + return err + } + // Dispatch to actions. + for _, element := range context.Elements { + if applier, ok := element.Clause.(actionApplier); ok { + if err := applier.applyActions(context); err != nil { + return err + } + } + } + return nil +} + +// Errorf prints an error message to w in the format ": error: ". +func (a *Application) Errorf(format string, args ...interface{}) { + fmt.Fprintf(a.errorWriter, a.Name+": error: "+format+"\n", args...) +} + +// Fatalf writes a formatted error to w then terminates with exit status 1. +func (a *Application) Fatalf(format string, args ...interface{}) { + a.Errorf(format, args...) + a.terminate(1) +} + +// FatalUsage prints an error message followed by usage information, then +// exits with a non-zero status. +func (a *Application) FatalUsage(format string, args ...interface{}) { + a.Errorf(format, args...) + // Force usage to go to error output. + a.usageWriter = a.errorWriter + a.Usage([]string{}) + a.terminate(1) +} + +// FatalUsageContext writes a printf formatted error message to w, then usage +// information for the given ParseContext, before exiting. +func (a *Application) FatalUsageContext(context *ParseContext, format string, args ...interface{}) { + a.Errorf(format, args...) + if err := a.UsageForContext(context); err != nil { + panic(err) + } + a.terminate(1) +} + +// FatalIfError prints an error and exits if err is not nil. The error is printed +// with the given formatted string, if any. +func (a *Application) FatalIfError(err error, format string, args ...interface{}) { + if err != nil { + prefix := "" + if format != "" { + prefix = fmt.Sprintf(format, args...) + ": " + } + a.Errorf(prefix+"%s", err) + a.terminate(1) + } +} + +func (a *Application) completionOptions(context *ParseContext) []string { + args := context.rawArgs + + var ( + currArg string + prevArg string + target cmdMixin + ) + + numArgs := len(args) + if numArgs > 1 { + args = args[1:] + currArg = args[len(args)-1] + } + if numArgs > 2 { + prevArg = args[len(args)-2] + } + + target = a.cmdMixin + if context.SelectedCommand != nil { + // A subcommand was in use. We will use it as the target + target = context.SelectedCommand.cmdMixin + } + + if (currArg != "" && strings.HasPrefix(currArg, "--")) || strings.HasPrefix(prevArg, "--") { + // Perform completion for A flag. The last/current argument started with "-" + var ( + flagName string // The name of a flag if given (could be half complete) + flagValue string // The value assigned to a flag (if given) (could be half complete) + ) + + if strings.HasPrefix(prevArg, "--") && !strings.HasPrefix(currArg, "--") { + // Matches: ./myApp --flag value + // Wont Match: ./myApp --flag -- + flagName = prevArg[2:] // Strip the "--" + flagValue = currArg + } else if strings.HasPrefix(currArg, "--") { + // Matches: ./myApp --flag -- + // Matches: ./myApp --flag somevalue -- + // Matches: ./myApp -- + flagName = currArg[2:] // Strip the "--" + } + + options, flagMatched, valueMatched := target.FlagCompletion(flagName, flagValue) + if valueMatched { + // Value Matched. Show cmdCompletions + return target.CmdCompletion(context) + } + + // Add top level flags if we're not at the top level and no match was found. + if context.SelectedCommand != nil && !flagMatched { + topOptions, topFlagMatched, topValueMatched := a.FlagCompletion(flagName, flagValue) + if topValueMatched { + // Value Matched. Back to cmdCompletions + return target.CmdCompletion(context) + } + + if topFlagMatched { + // Top level had a flag which matched the input. Return it's options. + options = topOptions + } else { + // Add top level flags + options = append(options, topOptions...) + } + } + return options + } + + // Perform completion for sub commands and arguments. + return target.CmdCompletion(context) +} + +func (a *Application) generateBashCompletion(context *ParseContext) { + options := a.completionOptions(context) + fmt.Printf("%s", strings.Join(options, "\n")) +} + +func envarTransform(name string) string { + return strings.ToUpper(envarTransformRegexp.ReplaceAllString(name, "_")) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/args.go b/vendor/gopkg.in/alecthomas/kingpin.v2/args.go new file mode 100644 index 0000000000..340069476b --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/args.go @@ -0,0 +1,184 @@ +package kingpin + +import ( + "fmt" +) + +type argGroup struct { + args []*ArgClause +} + +func newArgGroup() *argGroup { + return &argGroup{} +} + +func (a *argGroup) have() bool { + return len(a.args) > 0 +} + +// GetArg gets an argument definition. +// +// This allows existing arguments to be modified after definition but before parsing. Useful for +// modular applications. +func (a *argGroup) GetArg(name string) *ArgClause { + for _, arg := range a.args { + if arg.name == name { + return arg + } + } + return nil +} + +func (a *argGroup) Arg(name, help string) *ArgClause { + arg := newArg(name, help) + a.args = append(a.args, arg) + return arg +} + +func (a *argGroup) init() error { + required := 0 + seen := map[string]struct{}{} + previousArgMustBeLast := false + for i, arg := range a.args { + if previousArgMustBeLast { + return fmt.Errorf("Args() can't be followed by another argument '%s'", arg.name) + } + if arg.consumesRemainder() { + previousArgMustBeLast = true + } + if _, ok := seen[arg.name]; ok { + return fmt.Errorf("duplicate argument '%s'", arg.name) + } + seen[arg.name] = struct{}{} + if arg.required && required != i { + return fmt.Errorf("required arguments found after non-required") + } + if arg.required { + required++ + } + if err := arg.init(); err != nil { + return err + } + } + return nil +} + +type ArgClause struct { + actionMixin + parserMixin + completionsMixin + envarMixin + name string + help string + defaultValues []string + required bool +} + +func newArg(name, help string) *ArgClause { + a := &ArgClause{ + name: name, + help: help, + } + return a +} + +func (a *ArgClause) setDefault() error { + if a.HasEnvarValue() { + if v, ok := a.value.(remainderArg); !ok || !v.IsCumulative() { + // Use the value as-is + return a.value.Set(a.GetEnvarValue()) + } + for _, value := range a.GetSplitEnvarValue() { + if err := a.value.Set(value); err != nil { + return err + } + } + return nil + } + + if len(a.defaultValues) > 0 { + for _, defaultValue := range a.defaultValues { + if err := a.value.Set(defaultValue); err != nil { + return err + } + } + return nil + } + + return nil +} + +func (a *ArgClause) needsValue() bool { + haveDefault := len(a.defaultValues) > 0 + return a.required && !(haveDefault || a.HasEnvarValue()) +} + +func (a *ArgClause) consumesRemainder() bool { + if r, ok := a.value.(remainderArg); ok { + return r.IsCumulative() + } + return false +} + +// Required arguments must be input by the user. They can not have a Default() value provided. +func (a *ArgClause) Required() *ArgClause { + a.required = true + return a +} + +// Default values for this argument. They *must* be parseable by the value of the argument. +func (a *ArgClause) Default(values ...string) *ArgClause { + a.defaultValues = values + return a +} + +// Envar overrides the default value(s) for a flag from an environment variable, +// if it is set. Several default values can be provided by using new lines to +// separate them. +func (a *ArgClause) Envar(name string) *ArgClause { + a.envar = name + a.noEnvar = false + return a +} + +// NoEnvar forces environment variable defaults to be disabled for this flag. +// Most useful in conjunction with app.DefaultEnvars(). +func (a *ArgClause) NoEnvar() *ArgClause { + a.envar = "" + a.noEnvar = true + return a +} + +func (a *ArgClause) Action(action Action) *ArgClause { + a.addAction(action) + return a +} + +func (a *ArgClause) PreAction(action Action) *ArgClause { + a.addPreAction(action) + return a +} + +// HintAction registers a HintAction (function) for the arg to provide completions +func (a *ArgClause) HintAction(action HintAction) *ArgClause { + a.addHintAction(action) + return a +} + +// HintOptions registers any number of options for the flag to provide completions +func (a *ArgClause) HintOptions(options ...string) *ArgClause { + a.addHintAction(func() []string { + return options + }) + return a +} + +func (a *ArgClause) init() error { + if a.required && len(a.defaultValues) > 0 { + return fmt.Errorf("required argument '%s' with unusable default value", a.name) + } + if a.value == nil { + return fmt.Errorf("no parser defined for arg '%s'", a.name) + } + return nil +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go new file mode 100644 index 0000000000..0473b871d5 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go @@ -0,0 +1,274 @@ +package kingpin + +import ( + "fmt" + "strings" +) + +type cmdMixin struct { + *flagGroup + *argGroup + *cmdGroup + actionMixin +} + +// CmdCompletion returns completion options for arguments, if that's where +// parsing left off, or commands if there aren't any unsatisfied args. +func (c *cmdMixin) CmdCompletion(context *ParseContext) []string { + var options []string + + // Count args already satisfied - we won't complete those, and add any + // default commands' alternatives, since they weren't listed explicitly + // and the user may want to explicitly list something else. + argsSatisfied := 0 + for _, el := range context.Elements { + switch clause := el.Clause.(type) { + case *ArgClause: + if el.Value != nil && *el.Value != "" { + argsSatisfied++ + } + case *CmdClause: + options = append(options, clause.completionAlts...) + default: + } + } + + if argsSatisfied < len(c.argGroup.args) { + // Since not all args have been satisfied, show options for the current one + options = append(options, c.argGroup.args[argsSatisfied].resolveCompletions()...) + } else { + // If all args are satisfied, then go back to completing commands + for _, cmd := range c.cmdGroup.commandOrder { + if !cmd.hidden { + options = append(options, cmd.name) + } + } + } + + return options +} + +func (c *cmdMixin) FlagCompletion(flagName string, flagValue string) (choices []string, flagMatch bool, optionMatch bool) { + // Check if flagName matches a known flag. + // If it does, show the options for the flag + // Otherwise, show all flags + + options := []string{} + + for _, flag := range c.flagGroup.flagOrder { + // Loop through each flag and determine if a match exists + if flag.name == flagName { + // User typed entire flag. Need to look for flag options. + options = flag.resolveCompletions() + if len(options) == 0 { + // No Options to Choose From, Assume Match. + return options, true, true + } + + // Loop options to find if the user specified value matches + isPrefix := false + matched := false + + for _, opt := range options { + if flagValue == opt { + matched = true + } else if strings.HasPrefix(opt, flagValue) { + isPrefix = true + } + } + + // Matched Flag Directly + // Flag Value Not Prefixed, and Matched Directly + return options, true, !isPrefix && matched + } + + if !flag.hidden { + options = append(options, "--"+flag.name) + } + } + // No Flag directly matched. + return options, false, false + +} + +type cmdGroup struct { + app *Application + parent *CmdClause + commands map[string]*CmdClause + commandOrder []*CmdClause +} + +func (c *cmdGroup) defaultSubcommand() *CmdClause { + for _, cmd := range c.commandOrder { + if cmd.isDefault { + return cmd + } + } + return nil +} + +func (c *cmdGroup) cmdNames() []string { + names := make([]string, 0, len(c.commandOrder)) + for _, cmd := range c.commandOrder { + names = append(names, cmd.name) + } + return names +} + +// GetArg gets a command definition. +// +// This allows existing commands to be modified after definition but before parsing. Useful for +// modular applications. +func (c *cmdGroup) GetCommand(name string) *CmdClause { + return c.commands[name] +} + +func newCmdGroup(app *Application) *cmdGroup { + return &cmdGroup{ + app: app, + commands: make(map[string]*CmdClause), + } +} + +func (c *cmdGroup) flattenedCommands() (out []*CmdClause) { + for _, cmd := range c.commandOrder { + if len(cmd.commands) == 0 { + out = append(out, cmd) + } + out = append(out, cmd.flattenedCommands()...) + } + return +} + +func (c *cmdGroup) addCommand(name, help string) *CmdClause { + cmd := newCommand(c.app, name, help) + c.commands[name] = cmd + c.commandOrder = append(c.commandOrder, cmd) + return cmd +} + +func (c *cmdGroup) init() error { + seen := map[string]bool{} + if c.defaultSubcommand() != nil && !c.have() { + return fmt.Errorf("default subcommand %q provided but no subcommands defined", c.defaultSubcommand().name) + } + defaults := []string{} + for _, cmd := range c.commandOrder { + if cmd.isDefault { + defaults = append(defaults, cmd.name) + } + if seen[cmd.name] { + return fmt.Errorf("duplicate command %q", cmd.name) + } + seen[cmd.name] = true + for _, alias := range cmd.aliases { + if seen[alias] { + return fmt.Errorf("alias duplicates existing command %q", alias) + } + c.commands[alias] = cmd + } + if err := cmd.init(); err != nil { + return err + } + } + if len(defaults) > 1 { + return fmt.Errorf("more than one default subcommand exists: %s", strings.Join(defaults, ", ")) + } + return nil +} + +func (c *cmdGroup) have() bool { + return len(c.commands) > 0 +} + +type CmdClauseValidator func(*CmdClause) error + +// A CmdClause is a single top-level command. It encapsulates a set of flags +// and either subcommands or positional arguments. +type CmdClause struct { + cmdMixin + app *Application + name string + aliases []string + help string + isDefault bool + validator CmdClauseValidator + hidden bool + completionAlts []string +} + +func newCommand(app *Application, name, help string) *CmdClause { + c := &CmdClause{ + app: app, + name: name, + help: help, + } + c.flagGroup = newFlagGroup() + c.argGroup = newArgGroup() + c.cmdGroup = newCmdGroup(app) + return c +} + +// Add an Alias for this command. +func (c *CmdClause) Alias(name string) *CmdClause { + c.aliases = append(c.aliases, name) + return c +} + +// Validate sets a validation function to run when parsing. +func (c *CmdClause) Validate(validator CmdClauseValidator) *CmdClause { + c.validator = validator + return c +} + +func (c *CmdClause) FullCommand() string { + out := []string{c.name} + for p := c.parent; p != nil; p = p.parent { + out = append([]string{p.name}, out...) + } + return strings.Join(out, " ") +} + +// Command adds a new sub-command. +func (c *CmdClause) Command(name, help string) *CmdClause { + cmd := c.addCommand(name, help) + cmd.parent = c + return cmd +} + +// Default makes this command the default if commands don't match. +func (c *CmdClause) Default() *CmdClause { + c.isDefault = true + return c +} + +func (c *CmdClause) Action(action Action) *CmdClause { + c.addAction(action) + return c +} + +func (c *CmdClause) PreAction(action Action) *CmdClause { + c.addPreAction(action) + return c +} + +func (c *CmdClause) init() error { + if err := c.flagGroup.init(c.app.defaultEnvarPrefix()); err != nil { + return err + } + if c.argGroup.have() && c.cmdGroup.have() { + return fmt.Errorf("can't mix Arg()s with Command()s") + } + if err := c.argGroup.init(); err != nil { + return err + } + if err := c.cmdGroup.init(); err != nil { + return err + } + return nil +} + +func (c *CmdClause) Hidden() *CmdClause { + c.hidden = true + return c +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go new file mode 100644 index 0000000000..6e7b409fe4 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go @@ -0,0 +1,33 @@ +package kingpin + +// HintAction is a function type who is expected to return a slice of possible +// command line arguments. +type HintAction func() []string +type completionsMixin struct { + hintActions []HintAction + builtinHintActions []HintAction +} + +func (a *completionsMixin) addHintAction(action HintAction) { + a.hintActions = append(a.hintActions, action) +} + +// Allow adding of HintActions which are added internally, ie, EnumVar +func (a *completionsMixin) addHintActionBuiltin(action HintAction) { + a.builtinHintActions = append(a.builtinHintActions, action) +} + +func (a *completionsMixin) resolveCompletions() []string { + var hints []string + + options := a.builtinHintActions + if len(a.hintActions) > 0 { + // User specified their own hintActions. Use those instead. + options = a.hintActions + } + + for _, hintAction := range options { + hints = append(hints, hintAction()...) + } + return hints +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go b/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go new file mode 100644 index 0000000000..cb951a8045 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go @@ -0,0 +1,68 @@ +// Package kingpin provides command line interfaces like this: +// +// $ chat +// usage: chat [] [] [ ...] +// +// Flags: +// --debug enable debug mode +// --help Show help. +// --server=127.0.0.1 server address +// +// Commands: +// help +// Show help for a command. +// +// post [] +// Post a message to a channel. +// +// register +// Register a new user. +// +// $ chat help post +// usage: chat [] post [] [] +// +// Post a message to a channel. +// +// Flags: +// --image=IMAGE image to post +// +// Args: +// channel to post to +// [] text to post +// $ chat post --image=~/Downloads/owls.jpg pics +// +// From code like this: +// +// package main +// +// import "gopkg.in/alecthomas/kingpin.v2" +// +// var ( +// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool() +// serverIP = kingpin.Flag("server", "server address").Default("127.0.0.1").IP() +// +// register = kingpin.Command("register", "Register a new user.") +// registerNick = register.Arg("nick", "nickname for user").Required().String() +// registerName = register.Arg("name", "name of user").Required().String() +// +// post = kingpin.Command("post", "Post a message to a channel.") +// postImage = post.Flag("image", "image to post").ExistingFile() +// postChannel = post.Arg("channel", "channel to post to").Required().String() +// postText = post.Arg("text", "text to post").String() +// ) +// +// func main() { +// switch kingpin.Parse() { +// // Register user +// case "register": +// println(*registerNick) +// +// // Post message +// case "post": +// if *postImage != nil { +// } +// if *postText != "" { +// } +// } +// } +package kingpin diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go b/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go new file mode 100644 index 0000000000..c01a27df80 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go @@ -0,0 +1,45 @@ +package kingpin + +import ( + "os" + "regexp" +) + +var ( + envVarValuesSeparator = "\r?\n" + envVarValuesTrimmer = regexp.MustCompile(envVarValuesSeparator + "$") + envVarValuesSplitter = regexp.MustCompile(envVarValuesSeparator) +) + +type envarMixin struct { + envar string + noEnvar bool +} + +func (e *envarMixin) HasEnvarValue() bool { + return e.GetEnvarValue() != "" +} + +func (e *envarMixin) GetEnvarValue() string { + if e.noEnvar || e.envar == "" { + return "" + } + return os.Getenv(e.envar) +} + +func (e *envarMixin) GetSplitEnvarValue() []string { + values := make([]string, 0) + + envarValue := e.GetEnvarValue() + if envarValue == "" { + return values + } + + // Split by new line to extract multiple values, if any. + trimmed := envVarValuesTrimmer.ReplaceAllString(envarValue, "") + for _, value := range envVarValuesSplitter.Split(trimmed, -1) { + values = append(values, value) + } + + return values +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go b/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go new file mode 100644 index 0000000000..8f33721fc9 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go @@ -0,0 +1,308 @@ +package kingpin + +import ( + "fmt" + "strings" +) + +type flagGroup struct { + short map[string]*FlagClause + long map[string]*FlagClause + flagOrder []*FlagClause +} + +func newFlagGroup() *flagGroup { + return &flagGroup{ + short: map[string]*FlagClause{}, + long: map[string]*FlagClause{}, + } +} + +// GetFlag gets a flag definition. +// +// This allows existing flags to be modified after definition but before parsing. Useful for +// modular applications. +func (f *flagGroup) GetFlag(name string) *FlagClause { + return f.long[name] +} + +// Flag defines a new flag with the given long name and help. +func (f *flagGroup) Flag(name, help string) *FlagClause { + flag := newFlag(name, help) + f.long[name] = flag + f.flagOrder = append(f.flagOrder, flag) + return flag +} + +func (f *flagGroup) init(defaultEnvarPrefix string) error { + if err := f.checkDuplicates(); err != nil { + return err + } + for _, flag := range f.long { + if defaultEnvarPrefix != "" && !flag.noEnvar && flag.envar == "" { + flag.envar = envarTransform(defaultEnvarPrefix + "_" + flag.name) + } + if err := flag.init(); err != nil { + return err + } + if flag.shorthand != 0 { + f.short[string(flag.shorthand)] = flag + } + } + return nil +} + +func (f *flagGroup) checkDuplicates() error { + seenShort := map[rune]bool{} + seenLong := map[string]bool{} + for _, flag := range f.flagOrder { + if flag.shorthand != 0 { + if _, ok := seenShort[flag.shorthand]; ok { + return fmt.Errorf("duplicate short flag -%c", flag.shorthand) + } + seenShort[flag.shorthand] = true + } + if _, ok := seenLong[flag.name]; ok { + return fmt.Errorf("duplicate long flag --%s", flag.name) + } + seenLong[flag.name] = true + } + return nil +} + +func (f *flagGroup) parse(context *ParseContext) (*FlagClause, error) { + var token *Token + +loop: + for { + token = context.Peek() + switch token.Type { + case TokenEOL: + break loop + + case TokenLong, TokenShort: + flagToken := token + defaultValue := "" + var flag *FlagClause + var ok bool + invert := false + + name := token.Value + if token.Type == TokenLong { + flag, ok = f.long[name] + if !ok { + if strings.HasPrefix(name, "no-") { + name = name[3:] + invert = true + } + flag, ok = f.long[name] + } + if !ok { + return nil, fmt.Errorf("unknown long flag '%s'", flagToken) + } + } else { + flag, ok = f.short[name] + if !ok { + return nil, fmt.Errorf("unknown short flag '%s'", flagToken) + } + } + + context.Next() + + fb, ok := flag.value.(boolFlag) + if ok && fb.IsBoolFlag() { + if invert { + defaultValue = "false" + } else { + defaultValue = "true" + } + } else { + if invert { + context.Push(token) + return nil, fmt.Errorf("unknown long flag '%s'", flagToken) + } + token = context.Peek() + if token.Type != TokenArg { + context.Push(token) + return nil, fmt.Errorf("expected argument for flag '%s'", flagToken) + } + context.Next() + defaultValue = token.Value + } + + context.matchedFlag(flag, defaultValue) + return flag, nil + + default: + break loop + } + } + return nil, nil +} + +// FlagClause is a fluid interface used to build flags. +type FlagClause struct { + parserMixin + actionMixin + completionsMixin + envarMixin + name string + shorthand rune + help string + defaultValues []string + placeholder string + hidden bool +} + +func newFlag(name, help string) *FlagClause { + f := &FlagClause{ + name: name, + help: help, + } + return f +} + +func (f *FlagClause) setDefault() error { + if f.HasEnvarValue() { + if v, ok := f.value.(repeatableFlag); !ok || !v.IsCumulative() { + // Use the value as-is + return f.value.Set(f.GetEnvarValue()) + } else { + for _, value := range f.GetSplitEnvarValue() { + if err := f.value.Set(value); err != nil { + return err + } + } + return nil + } + } + + if len(f.defaultValues) > 0 { + for _, defaultValue := range f.defaultValues { + if err := f.value.Set(defaultValue); err != nil { + return err + } + } + return nil + } + + return nil +} + +func (f *FlagClause) needsValue() bool { + haveDefault := len(f.defaultValues) > 0 + return f.required && !(haveDefault || f.HasEnvarValue()) +} + +func (f *FlagClause) init() error { + if f.required && len(f.defaultValues) > 0 { + return fmt.Errorf("required flag '--%s' with default value that will never be used", f.name) + } + if f.value == nil { + return fmt.Errorf("no type defined for --%s (eg. .String())", f.name) + } + if v, ok := f.value.(repeatableFlag); (!ok || !v.IsCumulative()) && len(f.defaultValues) > 1 { + return fmt.Errorf("invalid default for '--%s', expecting single value", f.name) + } + return nil +} + +// Dispatch to the given function after the flag is parsed and validated. +func (f *FlagClause) Action(action Action) *FlagClause { + f.addAction(action) + return f +} + +func (f *FlagClause) PreAction(action Action) *FlagClause { + f.addPreAction(action) + return f +} + +// HintAction registers a HintAction (function) for the flag to provide completions +func (a *FlagClause) HintAction(action HintAction) *FlagClause { + a.addHintAction(action) + return a +} + +// HintOptions registers any number of options for the flag to provide completions +func (a *FlagClause) HintOptions(options ...string) *FlagClause { + a.addHintAction(func() []string { + return options + }) + return a +} + +func (a *FlagClause) EnumVar(target *string, options ...string) { + a.parserMixin.EnumVar(target, options...) + a.addHintActionBuiltin(func() []string { + return options + }) +} + +func (a *FlagClause) Enum(options ...string) (target *string) { + a.addHintActionBuiltin(func() []string { + return options + }) + return a.parserMixin.Enum(options...) +} + +// Default values for this flag. They *must* be parseable by the value of the flag. +func (f *FlagClause) Default(values ...string) *FlagClause { + f.defaultValues = values + return f +} + +// DEPRECATED: Use Envar(name) instead. +func (f *FlagClause) OverrideDefaultFromEnvar(envar string) *FlagClause { + return f.Envar(envar) +} + +// Envar overrides the default value(s) for a flag from an environment variable, +// if it is set. Several default values can be provided by using new lines to +// separate them. +func (f *FlagClause) Envar(name string) *FlagClause { + f.envar = name + f.noEnvar = false + return f +} + +// NoEnvar forces environment variable defaults to be disabled for this flag. +// Most useful in conjunction with app.DefaultEnvars(). +func (f *FlagClause) NoEnvar() *FlagClause { + f.envar = "" + f.noEnvar = true + return f +} + +// PlaceHolder sets the place-holder string used for flag values in the help. The +// default behaviour is to use the value provided by Default() if provided, +// then fall back on the capitalized flag name. +func (f *FlagClause) PlaceHolder(placeholder string) *FlagClause { + f.placeholder = placeholder + return f +} + +// Hidden hides a flag from usage but still allows it to be used. +func (f *FlagClause) Hidden() *FlagClause { + f.hidden = true + return f +} + +// Required makes the flag required. You can not provide a Default() value to a Required() flag. +func (f *FlagClause) Required() *FlagClause { + f.required = true + return f +} + +// Short sets the short flag name. +func (f *FlagClause) Short(name rune) *FlagClause { + f.shorthand = name + return f +} + +// Bool makes this flag a boolean flag. +func (f *FlagClause) Bool() (target *bool) { + target = new(bool) + f.SetValue(newBoolValue(target)) + return +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/global.go b/vendor/gopkg.in/alecthomas/kingpin.v2/global.go new file mode 100644 index 0000000000..10a29137ce --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/global.go @@ -0,0 +1,94 @@ +package kingpin + +import ( + "os" + "path/filepath" +) + +var ( + // CommandLine is the default Kingpin parser. + CommandLine = New(filepath.Base(os.Args[0]), "") + // Global help flag. Exposed for user customisation. + HelpFlag = CommandLine.HelpFlag + // Top-level help command. Exposed for user customisation. May be nil. + HelpCommand = CommandLine.HelpCommand + // Global version flag. Exposed for user customisation. May be nil. + VersionFlag = CommandLine.VersionFlag +) + +// Command adds a new command to the default parser. +func Command(name, help string) *CmdClause { + return CommandLine.Command(name, help) +} + +// Flag adds a new flag to the default parser. +func Flag(name, help string) *FlagClause { + return CommandLine.Flag(name, help) +} + +// Arg adds a new argument to the top-level of the default parser. +func Arg(name, help string) *ArgClause { + return CommandLine.Arg(name, help) +} + +// Parse and return the selected command. Will call the termination handler if +// an error is encountered. +func Parse() string { + selected := MustParse(CommandLine.Parse(os.Args[1:])) + if selected == "" && CommandLine.cmdGroup.have() { + Usage() + CommandLine.terminate(0) + } + return selected +} + +// Errorf prints an error message to stderr. +func Errorf(format string, args ...interface{}) { + CommandLine.Errorf(format, args...) +} + +// Fatalf prints an error message to stderr and exits. +func Fatalf(format string, args ...interface{}) { + CommandLine.Fatalf(format, args...) +} + +// FatalIfError prints an error and exits if err is not nil. The error is printed +// with the given prefix. +func FatalIfError(err error, format string, args ...interface{}) { + CommandLine.FatalIfError(err, format, args...) +} + +// FatalUsage prints an error message followed by usage information, then +// exits with a non-zero status. +func FatalUsage(format string, args ...interface{}) { + CommandLine.FatalUsage(format, args...) +} + +// FatalUsageContext writes a printf formatted error message to stderr, then +// usage information for the given ParseContext, before exiting. +func FatalUsageContext(context *ParseContext, format string, args ...interface{}) { + CommandLine.FatalUsageContext(context, format, args...) +} + +// Usage prints usage to stderr. +func Usage() { + CommandLine.Usage(os.Args[1:]) +} + +// Set global usage template to use (defaults to DefaultUsageTemplate). +func UsageTemplate(template string) *Application { + return CommandLine.UsageTemplate(template) +} + +// MustParse can be used with app.Parse(args) to exit with an error if parsing fails. +func MustParse(command string, err error) string { + if err != nil { + Fatalf("%s, try --help", err) + } + return command +} + +// Version adds a flag for displaying the application version number. +func Version(version string) *Application { + return CommandLine.Version(version) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go new file mode 100644 index 0000000000..a269531c86 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go @@ -0,0 +1,9 @@ +// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd + +package kingpin + +import "io" + +func guessWidth(w io.Writer) int { + return 80 +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go new file mode 100644 index 0000000000..ad8163f555 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go @@ -0,0 +1,38 @@ +// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd + +package kingpin + +import ( + "io" + "os" + "strconv" + "syscall" + "unsafe" +) + +func guessWidth(w io.Writer) int { + // check if COLUMNS env is set to comply with + // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html + colsStr := os.Getenv("COLUMNS") + if colsStr != "" { + if cols, err := strconv.Atoi(colsStr); err == nil { + return cols + } + } + + if t, ok := w.(*os.File); ok { + fd := t.Fd() + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6( + syscall.SYS_IOCTL, + uintptr(fd), + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(&dimensions)), + 0, 0, 0, + ); err == 0 { + return int(dimensions[1]) + } + } + return 80 +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/model.go b/vendor/gopkg.in/alecthomas/kingpin.v2/model.go new file mode 100644 index 0000000000..a4ee83b421 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/model.go @@ -0,0 +1,227 @@ +package kingpin + +import ( + "fmt" + "strconv" + "strings" +) + +// Data model for Kingpin command-line structure. + +type FlagGroupModel struct { + Flags []*FlagModel +} + +func (f *FlagGroupModel) FlagSummary() string { + out := []string{} + count := 0 + for _, flag := range f.Flags { + if flag.Name != "help" { + count++ + } + if flag.Required { + if flag.IsBoolFlag() { + out = append(out, fmt.Sprintf("--[no-]%s", flag.Name)) + } else { + out = append(out, fmt.Sprintf("--%s=%s", flag.Name, flag.FormatPlaceHolder())) + } + } + } + if count != len(out) { + out = append(out, "[]") + } + return strings.Join(out, " ") +} + +type FlagModel struct { + Name string + Help string + Short rune + Default []string + Envar string + PlaceHolder string + Required bool + Hidden bool + Value Value +} + +func (f *FlagModel) String() string { + return f.Value.String() +} + +func (f *FlagModel) IsBoolFlag() bool { + if fl, ok := f.Value.(boolFlag); ok { + return fl.IsBoolFlag() + } + return false +} + +func (f *FlagModel) FormatPlaceHolder() string { + if f.PlaceHolder != "" { + return f.PlaceHolder + } + if len(f.Default) > 0 { + ellipsis := "" + if len(f.Default) > 1 { + ellipsis = "..." + } + if _, ok := f.Value.(*stringValue); ok { + return strconv.Quote(f.Default[0]) + ellipsis + } + return f.Default[0] + ellipsis + } + return strings.ToUpper(f.Name) +} + +type ArgGroupModel struct { + Args []*ArgModel +} + +func (a *ArgGroupModel) ArgSummary() string { + depth := 0 + out := []string{} + for _, arg := range a.Args { + h := "<" + arg.Name + ">" + if !arg.Required { + h = "[" + h + depth++ + } + out = append(out, h) + } + out[len(out)-1] = out[len(out)-1] + strings.Repeat("]", depth) + return strings.Join(out, " ") +} + +type ArgModel struct { + Name string + Help string + Default []string + Envar string + Required bool + Value Value +} + +func (a *ArgModel) String() string { + return a.Value.String() +} + +type CmdGroupModel struct { + Commands []*CmdModel +} + +func (c *CmdGroupModel) FlattenedCommands() (out []*CmdModel) { + for _, cmd := range c.Commands { + if len(cmd.Commands) == 0 { + out = append(out, cmd) + } + out = append(out, cmd.FlattenedCommands()...) + } + return +} + +type CmdModel struct { + Name string + Aliases []string + Help string + FullCommand string + Depth int + Hidden bool + Default bool + *FlagGroupModel + *ArgGroupModel + *CmdGroupModel +} + +func (c *CmdModel) String() string { + return c.FullCommand +} + +type ApplicationModel struct { + Name string + Help string + Version string + Author string + *ArgGroupModel + *CmdGroupModel + *FlagGroupModel +} + +func (a *Application) Model() *ApplicationModel { + return &ApplicationModel{ + Name: a.Name, + Help: a.Help, + Version: a.version, + Author: a.author, + FlagGroupModel: a.flagGroup.Model(), + ArgGroupModel: a.argGroup.Model(), + CmdGroupModel: a.cmdGroup.Model(), + } +} + +func (a *argGroup) Model() *ArgGroupModel { + m := &ArgGroupModel{} + for _, arg := range a.args { + m.Args = append(m.Args, arg.Model()) + } + return m +} + +func (a *ArgClause) Model() *ArgModel { + return &ArgModel{ + Name: a.name, + Help: a.help, + Default: a.defaultValues, + Envar: a.envar, + Required: a.required, + Value: a.value, + } +} + +func (f *flagGroup) Model() *FlagGroupModel { + m := &FlagGroupModel{} + for _, fl := range f.flagOrder { + m.Flags = append(m.Flags, fl.Model()) + } + return m +} + +func (f *FlagClause) Model() *FlagModel { + return &FlagModel{ + Name: f.name, + Help: f.help, + Short: rune(f.shorthand), + Default: f.defaultValues, + Envar: f.envar, + PlaceHolder: f.placeholder, + Required: f.required, + Hidden: f.hidden, + Value: f.value, + } +} + +func (c *cmdGroup) Model() *CmdGroupModel { + m := &CmdGroupModel{} + for _, cm := range c.commandOrder { + m.Commands = append(m.Commands, cm.Model()) + } + return m +} + +func (c *CmdClause) Model() *CmdModel { + depth := 0 + for i := c; i != nil; i = i.parent { + depth++ + } + return &CmdModel{ + Name: c.name, + Aliases: c.aliases, + Help: c.help, + Depth: depth, + Hidden: c.hidden, + Default: c.isDefault, + FullCommand: c.FullCommand(), + FlagGroupModel: c.flagGroup.Model(), + ArgGroupModel: c.argGroup.Model(), + CmdGroupModel: c.cmdGroup.Model(), + } +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go new file mode 100644 index 0000000000..2a18351928 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go @@ -0,0 +1,396 @@ +package kingpin + +import ( + "bufio" + "fmt" + "os" + "strings" + "unicode/utf8" +) + +type TokenType int + +// Token types. +const ( + TokenShort TokenType = iota + TokenLong + TokenArg + TokenError + TokenEOL +) + +func (t TokenType) String() string { + switch t { + case TokenShort: + return "short flag" + case TokenLong: + return "long flag" + case TokenArg: + return "argument" + case TokenError: + return "error" + case TokenEOL: + return "" + } + return "?" +} + +var ( + TokenEOLMarker = Token{-1, TokenEOL, ""} +) + +type Token struct { + Index int + Type TokenType + Value string +} + +func (t *Token) Equal(o *Token) bool { + return t.Index == o.Index +} + +func (t *Token) IsFlag() bool { + return t.Type == TokenShort || t.Type == TokenLong +} + +func (t *Token) IsEOF() bool { + return t.Type == TokenEOL +} + +func (t *Token) String() string { + switch t.Type { + case TokenShort: + return "-" + t.Value + case TokenLong: + return "--" + t.Value + case TokenArg: + return t.Value + case TokenError: + return "error: " + t.Value + case TokenEOL: + return "" + default: + panic("unhandled type") + } +} + +// A union of possible elements in a parse stack. +type ParseElement struct { + // Clause is either *CmdClause, *ArgClause or *FlagClause. + Clause interface{} + // Value is corresponding value for an ArgClause or FlagClause (if any). + Value *string +} + +// ParseContext holds the current context of the parser. When passed to +// Action() callbacks Elements will be fully populated with *FlagClause, +// *ArgClause and *CmdClause values and their corresponding arguments (if +// any). +type ParseContext struct { + SelectedCommand *CmdClause + ignoreDefault bool + argsOnly bool + peek []*Token + argi int // Index of current command-line arg we're processing. + args []string + rawArgs []string + flags *flagGroup + arguments *argGroup + argumenti int // Cursor into arguments + // Flags, arguments and commands encountered and collected during parse. + Elements []*ParseElement +} + +func (p *ParseContext) nextArg() *ArgClause { + if p.argumenti >= len(p.arguments.args) { + return nil + } + arg := p.arguments.args[p.argumenti] + if !arg.consumesRemainder() { + p.argumenti++ + } + return arg +} + +func (p *ParseContext) next() { + p.argi++ + p.args = p.args[1:] +} + +// HasTrailingArgs returns true if there are unparsed command-line arguments. +// This can occur if the parser can not match remaining arguments. +func (p *ParseContext) HasTrailingArgs() bool { + return len(p.args) > 0 +} + +func tokenize(args []string, ignoreDefault bool) *ParseContext { + return &ParseContext{ + ignoreDefault: ignoreDefault, + args: args, + rawArgs: args, + flags: newFlagGroup(), + arguments: newArgGroup(), + } +} + +func (p *ParseContext) mergeFlags(flags *flagGroup) { + for _, flag := range flags.flagOrder { + if flag.shorthand != 0 { + p.flags.short[string(flag.shorthand)] = flag + } + p.flags.long[flag.name] = flag + p.flags.flagOrder = append(p.flags.flagOrder, flag) + } +} + +func (p *ParseContext) mergeArgs(args *argGroup) { + for _, arg := range args.args { + p.arguments.args = append(p.arguments.args, arg) + } +} + +func (p *ParseContext) EOL() bool { + return p.Peek().Type == TokenEOL +} + +func (p *ParseContext) Error() bool { + return p.Peek().Type == TokenError +} + +// Next token in the parse context. +func (p *ParseContext) Next() *Token { + if len(p.peek) > 0 { + return p.pop() + } + + // End of tokens. + if len(p.args) == 0 { + return &Token{Index: p.argi, Type: TokenEOL} + } + + arg := p.args[0] + p.next() + + if p.argsOnly { + return &Token{p.argi, TokenArg, arg} + } + + // All remaining args are passed directly. + if arg == "--" { + p.argsOnly = true + return p.Next() + } + + if strings.HasPrefix(arg, "--") { + parts := strings.SplitN(arg[2:], "=", 2) + token := &Token{p.argi, TokenLong, parts[0]} + if len(parts) == 2 { + p.Push(&Token{p.argi, TokenArg, parts[1]}) + } + return token + } + + if strings.HasPrefix(arg, "-") { + if len(arg) == 1 { + return &Token{Index: p.argi, Type: TokenShort} + } + shortRune, size := utf8.DecodeRuneInString(arg[1:]) + short := string(shortRune) + flag, ok := p.flags.short[short] + // Not a known short flag, we'll just return it anyway. + if !ok { + } else if fb, ok := flag.value.(boolFlag); ok && fb.IsBoolFlag() { + // Bool short flag. + } else { + // Short flag with combined argument: -fARG + token := &Token{p.argi, TokenShort, short} + if len(arg) > size+1 { + p.Push(&Token{p.argi, TokenArg, arg[size+1:]}) + } + return token + } + + if len(arg) > size+1 { + p.args = append([]string{"-" + arg[size+1:]}, p.args...) + } + return &Token{p.argi, TokenShort, short} + } else if strings.HasPrefix(arg, "@") { + expanded, err := ExpandArgsFromFile(arg[1:]) + if err != nil { + return &Token{p.argi, TokenError, err.Error()} + } + if len(p.args) == 0 { + p.args = expanded + } else { + p.args = append(expanded, p.args...) + } + return p.Next() + } + + return &Token{p.argi, TokenArg, arg} +} + +func (p *ParseContext) Peek() *Token { + if len(p.peek) == 0 { + return p.Push(p.Next()) + } + return p.peek[len(p.peek)-1] +} + +func (p *ParseContext) Push(token *Token) *Token { + p.peek = append(p.peek, token) + return token +} + +func (p *ParseContext) pop() *Token { + end := len(p.peek) - 1 + token := p.peek[end] + p.peek = p.peek[0:end] + return token +} + +func (p *ParseContext) String() string { + return p.SelectedCommand.FullCommand() +} + +func (p *ParseContext) matchedFlag(flag *FlagClause, value string) { + p.Elements = append(p.Elements, &ParseElement{Clause: flag, Value: &value}) +} + +func (p *ParseContext) matchedArg(arg *ArgClause, value string) { + p.Elements = append(p.Elements, &ParseElement{Clause: arg, Value: &value}) +} + +func (p *ParseContext) matchedCmd(cmd *CmdClause) { + p.Elements = append(p.Elements, &ParseElement{Clause: cmd}) + p.mergeFlags(cmd.flagGroup) + p.mergeArgs(cmd.argGroup) + p.SelectedCommand = cmd +} + +// Expand arguments from a file. Lines starting with # will be treated as comments. +func ExpandArgsFromFile(filename string) (out []string, err error) { + if filename == "" { + return nil, fmt.Errorf("expected @ file to expand arguments from") + } + r, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open arguments file %q: %s", filename, err) + } + defer r.Close() + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "#") { + continue + } + out = append(out, line) + } + err = scanner.Err() + if err != nil { + return nil, fmt.Errorf("failed to read arguments from %q: %s", filename, err) + } + return +} + +func parse(context *ParseContext, app *Application) (err error) { + context.mergeFlags(app.flagGroup) + context.mergeArgs(app.argGroup) + + cmds := app.cmdGroup + ignoreDefault := context.ignoreDefault + +loop: + for !context.EOL() && !context.Error() { + token := context.Peek() + + switch token.Type { + case TokenLong, TokenShort: + if flag, err := context.flags.parse(context); err != nil { + if !ignoreDefault { + if cmd := cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + break + } + } + return err + } else if flag == HelpFlag { + ignoreDefault = true + } + + case TokenArg: + if cmds.have() { + selectedDefault := false + cmd, ok := cmds.commands[token.String()] + if !ok { + if !ignoreDefault { + if cmd = cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + selectedDefault = true + } + } + if cmd == nil { + return fmt.Errorf("expected command but got %q", token) + } + } + if cmd == HelpCommand { + ignoreDefault = true + } + cmd.completionAlts = nil + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + if !selectedDefault { + context.Next() + } + } else if context.arguments.have() { + if app.noInterspersed { + // no more flags + context.argsOnly = true + } + arg := context.nextArg() + if arg == nil { + break loop + } + context.matchedArg(arg, token.String()) + context.Next() + } else { + break loop + } + + case TokenEOL: + break loop + } + } + + // Move to innermost default command. + for !ignoreDefault { + if cmd := cmds.defaultSubcommand(); cmd != nil { + cmd.completionAlts = cmds.cmdNames() + context.matchedCmd(cmd) + cmds = cmd.cmdGroup + } else { + break + } + } + + if context.Error() { + return fmt.Errorf("%s", context.Peek().Value) + } + + if !context.EOL() { + return fmt.Errorf("unexpected %s", context.Peek()) + } + + // Set defaults for all remaining args. + for arg := context.nextArg(); arg != nil && !arg.consumesRemainder(); arg = context.nextArg() { + for _, defaultValue := range arg.defaultValues { + if err := arg.value.Set(defaultValue); err != nil { + return fmt.Errorf("invalid default value '%s' for argument '%s'", defaultValue, arg.name) + } + } + } + + return +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go new file mode 100644 index 0000000000..d9ad57e5cf --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go @@ -0,0 +1,212 @@ +package kingpin + +import ( + "net" + "net/url" + "os" + "time" + + "github.com/alecthomas/units" +) + +type Settings interface { + SetValue(value Value) +} + +type parserMixin struct { + value Value + required bool +} + +func (p *parserMixin) SetValue(value Value) { + p.value = value +} + +// StringMap provides key=value parsing into a map. +func (p *parserMixin) StringMap() (target *map[string]string) { + target = &(map[string]string{}) + p.StringMapVar(target) + return +} + +// Duration sets the parser to a time.Duration parser. +func (p *parserMixin) Duration() (target *time.Duration) { + target = new(time.Duration) + p.DurationVar(target) + return +} + +// Bytes parses numeric byte units. eg. 1.5KB +func (p *parserMixin) Bytes() (target *units.Base2Bytes) { + target = new(units.Base2Bytes) + p.BytesVar(target) + return +} + +// IP sets the parser to a net.IP parser. +func (p *parserMixin) IP() (target *net.IP) { + target = new(net.IP) + p.IPVar(target) + return +} + +// TCP (host:port) address. +func (p *parserMixin) TCP() (target **net.TCPAddr) { + target = new(*net.TCPAddr) + p.TCPVar(target) + return +} + +// TCPVar (host:port) address. +func (p *parserMixin) TCPVar(target **net.TCPAddr) { + p.SetValue(newTCPAddrValue(target)) +} + +// ExistingFile sets the parser to one that requires and returns an existing file. +func (p *parserMixin) ExistingFile() (target *string) { + target = new(string) + p.ExistingFileVar(target) + return +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingDir() (target *string) { + target = new(string) + p.ExistingDirVar(target) + return +} + +// ExistingFileOrDir sets the parser to one that requires and returns an existing file OR directory. +func (p *parserMixin) ExistingFileOrDir() (target *string) { + target = new(string) + p.ExistingFileOrDirVar(target) + return +} + +// File returns an os.File against an existing file. +func (p *parserMixin) File() (target **os.File) { + target = new(*os.File) + p.FileVar(target) + return +} + +// File attempts to open a File with os.OpenFile(flag, perm). +func (p *parserMixin) OpenFile(flag int, perm os.FileMode) (target **os.File) { + target = new(*os.File) + p.OpenFileVar(target, flag, perm) + return +} + +// URL provides a valid, parsed url.URL. +func (p *parserMixin) URL() (target **url.URL) { + target = new(*url.URL) + p.URLVar(target) + return +} + +// StringMap provides key=value parsing into a map. +func (p *parserMixin) StringMapVar(target *map[string]string) { + p.SetValue(newStringMapValue(target)) +} + +// Float sets the parser to a float64 parser. +func (p *parserMixin) Float() (target *float64) { + return p.Float64() +} + +// Float sets the parser to a float64 parser. +func (p *parserMixin) FloatVar(target *float64) { + p.Float64Var(target) +} + +// Duration sets the parser to a time.Duration parser. +func (p *parserMixin) DurationVar(target *time.Duration) { + p.SetValue(newDurationValue(target)) +} + +// BytesVar parses numeric byte units. eg. 1.5KB +func (p *parserMixin) BytesVar(target *units.Base2Bytes) { + p.SetValue(newBytesValue(target)) +} + +// IP sets the parser to a net.IP parser. +func (p *parserMixin) IPVar(target *net.IP) { + p.SetValue(newIPValue(target)) +} + +// ExistingFile sets the parser to one that requires and returns an existing file. +func (p *parserMixin) ExistingFileVar(target *string) { + p.SetValue(newExistingFileValue(target)) +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingDirVar(target *string) { + p.SetValue(newExistingDirValue(target)) +} + +// ExistingDir sets the parser to one that requires and returns an existing directory. +func (p *parserMixin) ExistingFileOrDirVar(target *string) { + p.SetValue(newExistingFileOrDirValue(target)) +} + +// FileVar opens an existing file. +func (p *parserMixin) FileVar(target **os.File) { + p.SetValue(newFileValue(target, os.O_RDONLY, 0)) +} + +// OpenFileVar calls os.OpenFile(flag, perm) +func (p *parserMixin) OpenFileVar(target **os.File, flag int, perm os.FileMode) { + p.SetValue(newFileValue(target, flag, perm)) +} + +// URL provides a valid, parsed url.URL. +func (p *parserMixin) URLVar(target **url.URL) { + p.SetValue(newURLValue(target)) +} + +// URLList provides a parsed list of url.URL values. +func (p *parserMixin) URLList() (target *[]*url.URL) { + target = new([]*url.URL) + p.URLListVar(target) + return +} + +// URLListVar provides a parsed list of url.URL values. +func (p *parserMixin) URLListVar(target *[]*url.URL) { + p.SetValue(newURLListValue(target)) +} + +// Enum allows a value from a set of options. +func (p *parserMixin) Enum(options ...string) (target *string) { + target = new(string) + p.EnumVar(target, options...) + return +} + +// EnumVar allows a value from a set of options. +func (p *parserMixin) EnumVar(target *string, options ...string) { + p.SetValue(newEnumFlag(target, options...)) +} + +// Enums allows a set of values from a set of options. +func (p *parserMixin) Enums(options ...string) (target *[]string) { + target = new([]string) + p.EnumsVar(target, options...) + return +} + +// EnumVar allows a value from a set of options. +func (p *parserMixin) EnumsVar(target *[]string, options ...string) { + p.SetValue(newEnumsFlag(target, options...)) +} + +// A Counter increments a number each time it is encountered. +func (p *parserMixin) Counter() (target *int) { + target = new(int) + p.CounterVar(target) + return +} + +func (p *parserMixin) CounterVar(target *int) { + p.SetValue(newCounterValue(target)) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go b/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go new file mode 100644 index 0000000000..97b5c9fcb5 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go @@ -0,0 +1,262 @@ +package kingpin + +// Default usage template. +var DefaultUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +{{if len .Context.SelectedCommand.Commands}}\ +Subcommands: +{{template "FormatCommands" .Context.SelectedCommand}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +// Usage template where command's optional flags are listed separately +var SeparateOptionalFlagsUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ + +{{if .Context.Flags|RequiredFlags}}\ +Required flags: +{{.Context.Flags|RequiredFlags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Flags|OptionalFlags}}\ +Optional flags: +{{.Context.Flags|OptionalFlags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +Subcommands: +{{if .Context.SelectedCommand.Commands}}\ +{{template "FormatCommands" .Context.SelectedCommand}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +// Usage template with compactly formatted commands. +var CompactUsageTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommandList"}}\ +{{range .}}\ +{{if not .Hidden}}\ +{{.Depth|Indent}}{{.Name}}{{if .Default}}*{{end}}{{template "FormatCommand" .}} +{{end}}\ +{{template "FormatCommandList" .Commands}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +{{if .Context.SelectedCommand}}\ +usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}} +{{else}}\ +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{end}}\ +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.SelectedCommand}}\ +{{if .Context.SelectedCommand.Commands}}\ +Commands: + {{.Context.SelectedCommand}} +{{template "FormatCommandList" .Context.SelectedCommand.Commands}} +{{end}}\ +{{else if .App.Commands}}\ +Commands: +{{template "FormatCommandList" .App.Commands}} +{{end}}\ +` + +var ManPageTemplate = `{{define "FormatFlags"}}\ +{{range .Flags}}\ +{{if not .Hidden}}\ +.TP +\fB{{if .Short}}-{{.Short|Char}}, {{end}}--{{.Name}}{{if not .IsBoolFlag}}={{.FormatPlaceHolder}}{{end}}\\fR +{{.Help}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}{{if .Default}}*{{end}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ +.SS +\fB{{.FullCommand}}{{template "FormatCommand" .}}\\fR +.PP +{{.Help}} +{{template "FormatFlags" .}}\ +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}\\fR +{{end}}\ + +.TH {{.App.Name}} 1 {{.App.Version}} "{{.App.Author}}" +.SH "NAME" +{{.App.Name}} +.SH "SYNOPSIS" +.TP +\fB{{.App.Name}}{{template "FormatUsage" .App}} +.SH "DESCRIPTION" +{{.App.Help}} +.SH "OPTIONS" +{{template "FormatFlags" .App}}\ +{{if .App.Commands}}\ +.SH "COMMANDS" +{{template "FormatCommands" .App}}\ +{{end}}\ +` + +// Default usage template. +var LongHelpTemplate = `{{define "FormatCommand"}}\ +{{if .FlagSummary}} {{.FlagSummary}}{{end}}\ +{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\ +{{end}}\ + +{{define "FormatCommands"}}\ +{{range .FlattenedCommands}}\ +{{if not .Hidden}}\ + {{.FullCommand}}{{template "FormatCommand" .}} +{{.Help|Wrap 4}} +{{with .Flags|FlagsToTwoColumns}}{{FormatTwoColumnsWithIndent . 4 2}}{{end}} +{{end}}\ +{{end}}\ +{{end}}\ + +{{define "FormatUsage"}}\ +{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}} +{{if .Help}} +{{.Help|Wrap 0}}\ +{{end}}\ + +{{end}}\ + +usage: {{.App.Name}}{{template "FormatUsage" .App}} +{{if .Context.Flags}}\ +Flags: +{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .Context.Args}}\ +Args: +{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}} +{{end}}\ +{{if .App.Commands}}\ +Commands: +{{template "FormatCommands" .App}} +{{end}}\ +` + +var BashCompletionTemplate = ` +_{{.App.Name}}_bash_autocomplete() { + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 +} +complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}} + +` + +var ZshCompletionTemplate = ` +#compdef {{.App.Name}} +autoload -U compinit && compinit +autoload -U bashcompinit && bashcompinit + +_{{.App.Name}}_bash_autocomplete() { + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} ) + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 +} +complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}} +` diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go b/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go new file mode 100644 index 0000000000..44af6f6572 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go @@ -0,0 +1,211 @@ +package kingpin + +import ( + "bytes" + "fmt" + "go/doc" + "io" + "strings" + + "github.com/alecthomas/template" +) + +var ( + preIndent = " " +) + +func formatTwoColumns(w io.Writer, indent, padding, width int, rows [][2]string) { + // Find size of first column. + s := 0 + for _, row := range rows { + if c := len(row[0]); c > s && c < 30 { + s = c + } + } + + indentStr := strings.Repeat(" ", indent) + offsetStr := strings.Repeat(" ", s+padding) + + for _, row := range rows { + buf := bytes.NewBuffer(nil) + doc.ToText(buf, row[1], "", preIndent, width-s-padding-indent) + lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n") + fmt.Fprintf(w, "%s%-*s%*s", indentStr, s, row[0], padding, "") + if len(row[0]) >= 30 { + fmt.Fprintf(w, "\n%s%s", indentStr, offsetStr) + } + fmt.Fprintf(w, "%s\n", lines[0]) + for _, line := range lines[1:] { + fmt.Fprintf(w, "%s%s%s\n", indentStr, offsetStr, line) + } + } +} + +// Usage writes application usage to w. It parses args to determine +// appropriate help context, such as which command to show help for. +func (a *Application) Usage(args []string) { + context, err := a.parseContext(true, args) + a.FatalIfError(err, "") + if err := a.UsageForContextWithTemplate(context, 2, a.usageTemplate); err != nil { + panic(err) + } +} + +func formatAppUsage(app *ApplicationModel) string { + s := []string{app.Name} + if len(app.Flags) > 0 { + s = append(s, app.FlagSummary()) + } + if len(app.Args) > 0 { + s = append(s, app.ArgSummary()) + } + return strings.Join(s, " ") +} + +func formatCmdUsage(app *ApplicationModel, cmd *CmdModel) string { + s := []string{app.Name, cmd.String()} + if len(app.Flags) > 0 { + s = append(s, app.FlagSummary()) + } + if len(app.Args) > 0 { + s = append(s, app.ArgSummary()) + } + return strings.Join(s, " ") +} + +func formatFlag(haveShort bool, flag *FlagModel) string { + flagString := "" + if flag.Short != 0 { + flagString += fmt.Sprintf("-%c, --%s", flag.Short, flag.Name) + } else { + if haveShort { + flagString += fmt.Sprintf(" --%s", flag.Name) + } else { + flagString += fmt.Sprintf("--%s", flag.Name) + } + } + if !flag.IsBoolFlag() { + flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder()) + } + if v, ok := flag.Value.(repeatableFlag); ok && v.IsCumulative() { + flagString += " ..." + } + return flagString +} + +type templateParseContext struct { + SelectedCommand *CmdModel + *FlagGroupModel + *ArgGroupModel +} + +type templateContext struct { + App *ApplicationModel + Width int + Context *templateParseContext +} + +// UsageForContext displays usage information from a ParseContext (obtained from +// Application.ParseContext() or Action(f) callbacks). +func (a *Application) UsageForContext(context *ParseContext) error { + return a.UsageForContextWithTemplate(context, 2, a.usageTemplate) +} + +// UsageForContextWithTemplate is the base usage function. You generally don't need to use this. +func (a *Application) UsageForContextWithTemplate(context *ParseContext, indent int, tmpl string) error { + width := guessWidth(a.usageWriter) + funcs := template.FuncMap{ + "Indent": func(level int) string { + return strings.Repeat(" ", level*indent) + }, + "Wrap": func(indent int, s string) string { + buf := bytes.NewBuffer(nil) + indentText := strings.Repeat(" ", indent) + doc.ToText(buf, s, indentText, " "+indentText, width-indent) + return buf.String() + }, + "FormatFlag": formatFlag, + "FlagsToTwoColumns": func(f []*FlagModel) [][2]string { + rows := [][2]string{} + haveShort := false + for _, flag := range f { + if flag.Short != 0 { + haveShort = true + break + } + } + for _, flag := range f { + if !flag.Hidden { + rows = append(rows, [2]string{formatFlag(haveShort, flag), flag.Help}) + } + } + return rows + }, + "RequiredFlags": func(f []*FlagModel) []*FlagModel { + requiredFlags := []*FlagModel{} + for _, flag := range f { + if flag.Required { + requiredFlags = append(requiredFlags, flag) + } + } + return requiredFlags + }, + "OptionalFlags": func(f []*FlagModel) []*FlagModel { + optionalFlags := []*FlagModel{} + for _, flag := range f { + if !flag.Required { + optionalFlags = append(optionalFlags, flag) + } + } + return optionalFlags + }, + "ArgsToTwoColumns": func(a []*ArgModel) [][2]string { + rows := [][2]string{} + for _, arg := range a { + s := "<" + arg.Name + ">" + if !arg.Required { + s = "[" + s + "]" + } + rows = append(rows, [2]string{s, arg.Help}) + } + return rows + }, + "FormatTwoColumns": func(rows [][2]string) string { + buf := bytes.NewBuffer(nil) + formatTwoColumns(buf, indent, indent, width, rows) + return buf.String() + }, + "FormatTwoColumnsWithIndent": func(rows [][2]string, indent, padding int) string { + buf := bytes.NewBuffer(nil) + formatTwoColumns(buf, indent, padding, width, rows) + return buf.String() + }, + "FormatAppUsage": formatAppUsage, + "FormatCommandUsage": formatCmdUsage, + "IsCumulative": func(value Value) bool { + r, ok := value.(remainderArg) + return ok && r.IsCumulative() + }, + "Char": func(c rune) string { + return string(c) + }, + } + t, err := template.New("usage").Funcs(funcs).Parse(tmpl) + if err != nil { + return err + } + var selectedCommand *CmdModel + if context.SelectedCommand != nil { + selectedCommand = context.SelectedCommand.Model() + } + ctx := templateContext{ + App: a.Model(), + Width: width, + Context: &templateParseContext{ + SelectedCommand: selectedCommand, + FlagGroupModel: context.flags.Model(), + ArgGroupModel: context.arguments.Model(), + }, + } + return t.Execute(a.usageWriter, ctx) +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values.go new file mode 100644 index 0000000000..7ee9a3b3e1 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values.go @@ -0,0 +1,470 @@ +package kingpin + +//go:generate go run ./cmd/genvalues/main.go + +import ( + "fmt" + "net" + "net/url" + "os" + "reflect" + "regexp" + "strings" + "time" + + "github.com/alecthomas/units" +) + +// NOTE: Most of the base type values were lifted from: +// http://golang.org/src/pkg/flag/flag.go?s=20146:20222 + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, the command-line +// parser makes --name equivalent to -name=true rather than using the next +// command-line argument, and adds a --no-name counterpart for negating the +// flag. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// Optional interface to indicate boolean flags that don't accept a value, and +// implicitly have a --no- negation counterpart. +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// Optional interface for arguments that cumulatively consume all remaining +// input. +type remainderArg interface { + Value + IsCumulative() bool +} + +// Optional interface for flags that can be repeated. +type repeatableFlag interface { + Value + IsCumulative() bool +} + +type accumulator struct { + element func(value interface{}) Value + typ reflect.Type + slice reflect.Value +} + +// Use reflection to accumulate values into a slice. +// +// target := []string{} +// newAccumulator(&target, func (value interface{}) Value { +// return newStringValue(value.(*string)) +// }) +func newAccumulator(slice interface{}, element func(value interface{}) Value) *accumulator { + typ := reflect.TypeOf(slice) + if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Slice { + panic("expected a pointer to a slice") + } + return &accumulator{ + element: element, + typ: typ.Elem().Elem(), + slice: reflect.ValueOf(slice), + } +} + +func (a *accumulator) String() string { + out := []string{} + s := a.slice.Elem() + for i := 0; i < s.Len(); i++ { + out = append(out, a.element(s.Index(i).Addr().Interface()).String()) + } + return strings.Join(out, ",") +} + +func (a *accumulator) Set(value string) error { + e := reflect.New(a.typ) + if err := a.element(e.Interface()).Set(value); err != nil { + return err + } + slice := reflect.Append(a.slice.Elem(), e.Elem()) + a.slice.Elem().Set(slice) + return nil +} + +func (a *accumulator) Get() interface{} { + return a.slice.Interface() +} + +func (a *accumulator) IsCumulative() bool { + return true +} + +func (b *boolValue) IsBoolFlag() bool { return true } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(p *time.Duration) *durationValue { + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// -- map[string]string Value +type stringMapValue map[string]string + +func newStringMapValue(p *map[string]string) *stringMapValue { + return (*stringMapValue)(p) +} + +var stringMapRegex = regexp.MustCompile("[:=]") + +func (s *stringMapValue) Set(value string) error { + parts := stringMapRegex.Split(value, 2) + if len(parts) != 2 { + return fmt.Errorf("expected KEY=VALUE got '%s'", value) + } + (*s)[parts[0]] = parts[1] + return nil +} + +func (s *stringMapValue) Get() interface{} { + return (map[string]string)(*s) +} + +func (s *stringMapValue) String() string { + return fmt.Sprintf("%s", map[string]string(*s)) +} + +func (s *stringMapValue) IsCumulative() bool { + return true +} + +// -- net.IP Value +type ipValue net.IP + +func newIPValue(p *net.IP) *ipValue { + return (*ipValue)(p) +} + +func (i *ipValue) Set(value string) error { + if ip := net.ParseIP(value); ip == nil { + return fmt.Errorf("'%s' is not an IP address", value) + } else { + *i = *(*ipValue)(&ip) + return nil + } +} + +func (i *ipValue) Get() interface{} { + return (net.IP)(*i) +} + +func (i *ipValue) String() string { + return (*net.IP)(i).String() +} + +// -- *net.TCPAddr Value +type tcpAddrValue struct { + addr **net.TCPAddr +} + +func newTCPAddrValue(p **net.TCPAddr) *tcpAddrValue { + return &tcpAddrValue{p} +} + +func (i *tcpAddrValue) Set(value string) error { + if addr, err := net.ResolveTCPAddr("tcp", value); err != nil { + return fmt.Errorf("'%s' is not a valid TCP address: %s", value, err) + } else { + *i.addr = addr + return nil + } +} + +func (t *tcpAddrValue) Get() interface{} { + return (*net.TCPAddr)(*t.addr) +} + +func (i *tcpAddrValue) String() string { + return (*i.addr).String() +} + +// -- existingFile Value + +type fileStatValue struct { + path *string + predicate func(os.FileInfo) error +} + +func newFileStatValue(p *string, predicate func(os.FileInfo) error) *fileStatValue { + return &fileStatValue{ + path: p, + predicate: predicate, + } +} + +func (e *fileStatValue) Set(value string) error { + if s, err := os.Stat(value); os.IsNotExist(err) { + return fmt.Errorf("path '%s' does not exist", value) + } else if err != nil { + return err + } else if err := e.predicate(s); err != nil { + return err + } + *e.path = value + return nil +} + +func (f *fileStatValue) Get() interface{} { + return (string)(*f.path) +} + +func (e *fileStatValue) String() string { + return *e.path +} + +// -- os.File value + +type fileValue struct { + f **os.File + flag int + perm os.FileMode +} + +func newFileValue(p **os.File, flag int, perm os.FileMode) *fileValue { + return &fileValue{p, flag, perm} +} + +func (f *fileValue) Set(value string) error { + if fd, err := os.OpenFile(value, f.flag, f.perm); err != nil { + return err + } else { + *f.f = fd + return nil + } +} + +func (f *fileValue) Get() interface{} { + return (*os.File)(*f.f) +} + +func (f *fileValue) String() string { + if *f.f == nil { + return "" + } + return (*f.f).Name() +} + +// -- url.URL Value +type urlValue struct { + u **url.URL +} + +func newURLValue(p **url.URL) *urlValue { + return &urlValue{p} +} + +func (u *urlValue) Set(value string) error { + if url, err := url.Parse(value); err != nil { + return fmt.Errorf("invalid URL: %s", err) + } else { + *u.u = url + return nil + } +} + +func (u *urlValue) Get() interface{} { + return (*url.URL)(*u.u) +} + +func (u *urlValue) String() string { + if *u.u == nil { + return "" + } + return (*u.u).String() +} + +// -- []*url.URL Value +type urlListValue []*url.URL + +func newURLListValue(p *[]*url.URL) *urlListValue { + return (*urlListValue)(p) +} + +func (u *urlListValue) Set(value string) error { + if url, err := url.Parse(value); err != nil { + return fmt.Errorf("invalid URL: %s", err) + } else { + *u = append(*u, url) + return nil + } +} + +func (u *urlListValue) Get() interface{} { + return ([]*url.URL)(*u) +} + +func (u *urlListValue) String() string { + out := []string{} + for _, url := range *u { + out = append(out, url.String()) + } + return strings.Join(out, ",") +} + +func (u *urlListValue) IsCumulative() bool { + return true +} + +// A flag whose value must be in a set of options. +type enumValue struct { + value *string + options []string +} + +func newEnumFlag(target *string, options ...string) *enumValue { + return &enumValue{ + value: target, + options: options, + } +} + +func (a *enumValue) String() string { + return *a.value +} + +func (a *enumValue) Set(value string) error { + for _, v := range a.options { + if v == value { + *a.value = value + return nil + } + } + return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(a.options, ","), value) +} + +func (e *enumValue) Get() interface{} { + return (string)(*e.value) +} + +// -- []string Enum Value +type enumsValue struct { + value *[]string + options []string +} + +func newEnumsFlag(target *[]string, options ...string) *enumsValue { + return &enumsValue{ + value: target, + options: options, + } +} + +func (s *enumsValue) Set(value string) error { + for _, v := range s.options { + if v == value { + *s.value = append(*s.value, value) + return nil + } + } + return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(s.options, ","), value) +} + +func (e *enumsValue) Get() interface{} { + return ([]string)(*e.value) +} + +func (s *enumsValue) String() string { + return strings.Join(*s.value, ",") +} + +func (s *enumsValue) IsCumulative() bool { + return true +} + +// -- units.Base2Bytes Value +type bytesValue units.Base2Bytes + +func newBytesValue(p *units.Base2Bytes) *bytesValue { + return (*bytesValue)(p) +} + +func (d *bytesValue) Set(s string) error { + v, err := units.ParseBase2Bytes(s) + *d = bytesValue(v) + return err +} + +func (d *bytesValue) Get() interface{} { return units.Base2Bytes(*d) } + +func (d *bytesValue) String() string { return (*units.Base2Bytes)(d).String() } + +func newExistingFileValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { + if s.IsDir() { + return fmt.Errorf("'%s' is a directory", s.Name()) + } + return nil + }) +} + +func newExistingDirValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { + if !s.IsDir() { + return fmt.Errorf("'%s' is a file", s.Name()) + } + return nil + }) +} + +func newExistingFileOrDirValue(target *string) *fileStatValue { + return newFileStatValue(target, func(s os.FileInfo) error { return nil }) +} + +type counterValue int + +func newCounterValue(n *int) *counterValue { + return (*counterValue)(n) +} + +func (c *counterValue) Set(s string) error { + *c++ + return nil +} + +func (c *counterValue) Get() interface{} { return (int)(*c) } +func (c *counterValue) IsBoolFlag() bool { return true } +func (c *counterValue) String() string { return fmt.Sprintf("%d", *c) } +func (c *counterValue) IsCumulative() bool { return true } + +func resolveHost(value string) (net.IP, error) { + if ip := net.ParseIP(value); ip != nil { + return ip, nil + } else { + if addr, err := net.ResolveIPAddr("ip", value); err != nil { + return nil, err + } else { + return addr.IP, nil + } + } +} diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.json b/vendor/gopkg.in/alecthomas/kingpin.v2/values.json new file mode 100644 index 0000000000..23c67448e5 --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values.json @@ -0,0 +1,25 @@ +[ + {"type": "bool", "parser": "strconv.ParseBool(s)"}, + {"type": "string", "parser": "s, error(nil)", "format": "string(*f.v)", "plural": "Strings"}, + {"type": "uint", "parser": "strconv.ParseUint(s, 0, 64)", "plural": "Uints"}, + {"type": "uint8", "parser": "strconv.ParseUint(s, 0, 8)"}, + {"type": "uint16", "parser": "strconv.ParseUint(s, 0, 16)"}, + {"type": "uint32", "parser": "strconv.ParseUint(s, 0, 32)"}, + {"type": "uint64", "parser": "strconv.ParseUint(s, 0, 64)"}, + {"type": "int", "parser": "strconv.ParseFloat(s, 64)", "plural": "Ints"}, + {"type": "int8", "parser": "strconv.ParseInt(s, 0, 8)"}, + {"type": "int16", "parser": "strconv.ParseInt(s, 0, 16)"}, + {"type": "int32", "parser": "strconv.ParseInt(s, 0, 32)"}, + {"type": "int64", "parser": "strconv.ParseInt(s, 0, 64)"}, + {"type": "float64", "parser": "strconv.ParseFloat(s, 64)"}, + {"type": "float32", "parser": "strconv.ParseFloat(s, 32)"}, + {"name": "Duration", "type": "time.Duration", "no_value_parser": true}, + {"name": "IP", "type": "net.IP", "no_value_parser": true}, + {"name": "TCPAddr", "Type": "*net.TCPAddr", "plural": "TCPList", "no_value_parser": true}, + {"name": "ExistingFile", "Type": "string", "plural": "ExistingFiles", "no_value_parser": true}, + {"name": "ExistingDir", "Type": "string", "plural": "ExistingDirs", "no_value_parser": true}, + {"name": "ExistingFileOrDir", "Type": "string", "plural": "ExistingFilesOrDirs", "no_value_parser": true}, + {"name": "Regexp", "Type": "*regexp.Regexp", "parser": "regexp.Compile(s)"}, + {"name": "ResolvedIP", "Type": "net.IP", "parser": "resolveHost(s)", "help": "Resolve a hostname or IP to an IP."}, + {"name": "HexBytes", "Type": "[]byte", "parser": "hex.DecodeString(s)", "help": "Bytes as a hex string."} +] diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go new file mode 100644 index 0000000000..8d492bf9ce --- /dev/null +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go @@ -0,0 +1,821 @@ +package kingpin + +import ( + "encoding/hex" + "fmt" + "net" + "regexp" + "strconv" + "time" +) + +// This file is autogenerated by "go generate .". Do not modify. + +// -- bool Value +type boolValue struct{ v *bool } + +func newBoolValue(p *bool) *boolValue { + return &boolValue{p} +} + +func (f *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err == nil { + *f.v = (bool)(v) + } + return err +} + +func (f *boolValue) Get() interface{} { return (bool)(*f.v) } + +func (f *boolValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Bool parses the next command-line value as bool. +func (p *parserMixin) Bool() (target *bool) { + target = new(bool) + p.BoolVar(target) + return +} + +func (p *parserMixin) BoolVar(target *bool) { + p.SetValue(newBoolValue(target)) +} + +// BoolList accumulates bool values into a slice. +func (p *parserMixin) BoolList() (target *[]bool) { + target = new([]bool) + p.BoolListVar(target) + return +} + +func (p *parserMixin) BoolListVar(target *[]bool) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newBoolValue(v.(*bool)) + })) +} + +// -- string Value +type stringValue struct{ v *string } + +func newStringValue(p *string) *stringValue { + return &stringValue{p} +} + +func (f *stringValue) Set(s string) error { + v, err := s, error(nil) + if err == nil { + *f.v = (string)(v) + } + return err +} + +func (f *stringValue) Get() interface{} { return (string)(*f.v) } + +func (f *stringValue) String() string { return string(*f.v) } + +// String parses the next command-line value as string. +func (p *parserMixin) String() (target *string) { + target = new(string) + p.StringVar(target) + return +} + +func (p *parserMixin) StringVar(target *string) { + p.SetValue(newStringValue(target)) +} + +// Strings accumulates string values into a slice. +func (p *parserMixin) Strings() (target *[]string) { + target = new([]string) + p.StringsVar(target) + return +} + +func (p *parserMixin) StringsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newStringValue(v.(*string)) + })) +} + +// -- uint Value +type uintValue struct{ v *uint } + +func newUintValue(p *uint) *uintValue { + return &uintValue{p} +} + +func (f *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + *f.v = (uint)(v) + } + return err +} + +func (f *uintValue) Get() interface{} { return (uint)(*f.v) } + +func (f *uintValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Uint parses the next command-line value as uint. +func (p *parserMixin) Uint() (target *uint) { + target = new(uint) + p.UintVar(target) + return +} + +func (p *parserMixin) UintVar(target *uint) { + p.SetValue(newUintValue(target)) +} + +// Uints accumulates uint values into a slice. +func (p *parserMixin) Uints() (target *[]uint) { + target = new([]uint) + p.UintsVar(target) + return +} + +func (p *parserMixin) UintsVar(target *[]uint) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUintValue(v.(*uint)) + })) +} + +// -- uint8 Value +type uint8Value struct{ v *uint8 } + +func newUint8Value(p *uint8) *uint8Value { + return &uint8Value{p} +} + +func (f *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + if err == nil { + *f.v = (uint8)(v) + } + return err +} + +func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) } + +func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Uint8 parses the next command-line value as uint8. +func (p *parserMixin) Uint8() (target *uint8) { + target = new(uint8) + p.Uint8Var(target) + return +} + +func (p *parserMixin) Uint8Var(target *uint8) { + p.SetValue(newUint8Value(target)) +} + +// Uint8List accumulates uint8 values into a slice. +func (p *parserMixin) Uint8List() (target *[]uint8) { + target = new([]uint8) + p.Uint8ListVar(target) + return +} + +func (p *parserMixin) Uint8ListVar(target *[]uint8) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint8Value(v.(*uint8)) + })) +} + +// -- uint16 Value +type uint16Value struct{ v *uint16 } + +func newUint16Value(p *uint16) *uint16Value { + return &uint16Value{p} +} + +func (f *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + if err == nil { + *f.v = (uint16)(v) + } + return err +} + +func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) } + +func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Uint16 parses the next command-line value as uint16. +func (p *parserMixin) Uint16() (target *uint16) { + target = new(uint16) + p.Uint16Var(target) + return +} + +func (p *parserMixin) Uint16Var(target *uint16) { + p.SetValue(newUint16Value(target)) +} + +// Uint16List accumulates uint16 values into a slice. +func (p *parserMixin) Uint16List() (target *[]uint16) { + target = new([]uint16) + p.Uint16ListVar(target) + return +} + +func (p *parserMixin) Uint16ListVar(target *[]uint16) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint16Value(v.(*uint16)) + })) +} + +// -- uint32 Value +type uint32Value struct{ v *uint32 } + +func newUint32Value(p *uint32) *uint32Value { + return &uint32Value{p} +} + +func (f *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + if err == nil { + *f.v = (uint32)(v) + } + return err +} + +func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) } + +func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Uint32 parses the next command-line value as uint32. +func (p *parserMixin) Uint32() (target *uint32) { + target = new(uint32) + p.Uint32Var(target) + return +} + +func (p *parserMixin) Uint32Var(target *uint32) { + p.SetValue(newUint32Value(target)) +} + +// Uint32List accumulates uint32 values into a slice. +func (p *parserMixin) Uint32List() (target *[]uint32) { + target = new([]uint32) + p.Uint32ListVar(target) + return +} + +func (p *parserMixin) Uint32ListVar(target *[]uint32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint32Value(v.(*uint32)) + })) +} + +// -- uint64 Value +type uint64Value struct{ v *uint64 } + +func newUint64Value(p *uint64) *uint64Value { + return &uint64Value{p} +} + +func (f *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err == nil { + *f.v = (uint64)(v) + } + return err +} + +func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) } + +func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Uint64 parses the next command-line value as uint64. +func (p *parserMixin) Uint64() (target *uint64) { + target = new(uint64) + p.Uint64Var(target) + return +} + +func (p *parserMixin) Uint64Var(target *uint64) { + p.SetValue(newUint64Value(target)) +} + +// Uint64List accumulates uint64 values into a slice. +func (p *parserMixin) Uint64List() (target *[]uint64) { + target = new([]uint64) + p.Uint64ListVar(target) + return +} + +func (p *parserMixin) Uint64ListVar(target *[]uint64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newUint64Value(v.(*uint64)) + })) +} + +// -- int Value +type intValue struct{ v *int } + +func newIntValue(p *int) *intValue { + return &intValue{p} +} + +func (f *intValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + if err == nil { + *f.v = (int)(v) + } + return err +} + +func (f *intValue) Get() interface{} { return (int)(*f.v) } + +func (f *intValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Int parses the next command-line value as int. +func (p *parserMixin) Int() (target *int) { + target = new(int) + p.IntVar(target) + return +} + +func (p *parserMixin) IntVar(target *int) { + p.SetValue(newIntValue(target)) +} + +// Ints accumulates int values into a slice. +func (p *parserMixin) Ints() (target *[]int) { + target = new([]int) + p.IntsVar(target) + return +} + +func (p *parserMixin) IntsVar(target *[]int) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newIntValue(v.(*int)) + })) +} + +// -- int8 Value +type int8Value struct{ v *int8 } + +func newInt8Value(p *int8) *int8Value { + return &int8Value{p} +} + +func (f *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + if err == nil { + *f.v = (int8)(v) + } + return err +} + +func (f *int8Value) Get() interface{} { return (int8)(*f.v) } + +func (f *int8Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Int8 parses the next command-line value as int8. +func (p *parserMixin) Int8() (target *int8) { + target = new(int8) + p.Int8Var(target) + return +} + +func (p *parserMixin) Int8Var(target *int8) { + p.SetValue(newInt8Value(target)) +} + +// Int8List accumulates int8 values into a slice. +func (p *parserMixin) Int8List() (target *[]int8) { + target = new([]int8) + p.Int8ListVar(target) + return +} + +func (p *parserMixin) Int8ListVar(target *[]int8) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt8Value(v.(*int8)) + })) +} + +// -- int16 Value +type int16Value struct{ v *int16 } + +func newInt16Value(p *int16) *int16Value { + return &int16Value{p} +} + +func (f *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + if err == nil { + *f.v = (int16)(v) + } + return err +} + +func (f *int16Value) Get() interface{} { return (int16)(*f.v) } + +func (f *int16Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Int16 parses the next command-line value as int16. +func (p *parserMixin) Int16() (target *int16) { + target = new(int16) + p.Int16Var(target) + return +} + +func (p *parserMixin) Int16Var(target *int16) { + p.SetValue(newInt16Value(target)) +} + +// Int16List accumulates int16 values into a slice. +func (p *parserMixin) Int16List() (target *[]int16) { + target = new([]int16) + p.Int16ListVar(target) + return +} + +func (p *parserMixin) Int16ListVar(target *[]int16) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt16Value(v.(*int16)) + })) +} + +// -- int32 Value +type int32Value struct{ v *int32 } + +func newInt32Value(p *int32) *int32Value { + return &int32Value{p} +} + +func (f *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + if err == nil { + *f.v = (int32)(v) + } + return err +} + +func (f *int32Value) Get() interface{} { return (int32)(*f.v) } + +func (f *int32Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Int32 parses the next command-line value as int32. +func (p *parserMixin) Int32() (target *int32) { + target = new(int32) + p.Int32Var(target) + return +} + +func (p *parserMixin) Int32Var(target *int32) { + p.SetValue(newInt32Value(target)) +} + +// Int32List accumulates int32 values into a slice. +func (p *parserMixin) Int32List() (target *[]int32) { + target = new([]int32) + p.Int32ListVar(target) + return +} + +func (p *parserMixin) Int32ListVar(target *[]int32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt32Value(v.(*int32)) + })) +} + +// -- int64 Value +type int64Value struct{ v *int64 } + +func newInt64Value(p *int64) *int64Value { + return &int64Value{p} +} + +func (f *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + if err == nil { + *f.v = (int64)(v) + } + return err +} + +func (f *int64Value) Get() interface{} { return (int64)(*f.v) } + +func (f *int64Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Int64 parses the next command-line value as int64. +func (p *parserMixin) Int64() (target *int64) { + target = new(int64) + p.Int64Var(target) + return +} + +func (p *parserMixin) Int64Var(target *int64) { + p.SetValue(newInt64Value(target)) +} + +// Int64List accumulates int64 values into a slice. +func (p *parserMixin) Int64List() (target *[]int64) { + target = new([]int64) + p.Int64ListVar(target) + return +} + +func (p *parserMixin) Int64ListVar(target *[]int64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newInt64Value(v.(*int64)) + })) +} + +// -- float64 Value +type float64Value struct{ v *float64 } + +func newFloat64Value(p *float64) *float64Value { + return &float64Value{p} +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + if err == nil { + *f.v = (float64)(v) + } + return err +} + +func (f *float64Value) Get() interface{} { return (float64)(*f.v) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Float64 parses the next command-line value as float64. +func (p *parserMixin) Float64() (target *float64) { + target = new(float64) + p.Float64Var(target) + return +} + +func (p *parserMixin) Float64Var(target *float64) { + p.SetValue(newFloat64Value(target)) +} + +// Float64List accumulates float64 values into a slice. +func (p *parserMixin) Float64List() (target *[]float64) { + target = new([]float64) + p.Float64ListVar(target) + return +} + +func (p *parserMixin) Float64ListVar(target *[]float64) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newFloat64Value(v.(*float64)) + })) +} + +// -- float32 Value +type float32Value struct{ v *float32 } + +func newFloat32Value(p *float32) *float32Value { + return &float32Value{p} +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + if err == nil { + *f.v = (float32)(v) + } + return err +} + +func (f *float32Value) Get() interface{} { return (float32)(*f.v) } + +func (f *float32Value) String() string { return fmt.Sprintf("%v", *f.v) } + +// Float32 parses the next command-line value as float32. +func (p *parserMixin) Float32() (target *float32) { + target = new(float32) + p.Float32Var(target) + return +} + +func (p *parserMixin) Float32Var(target *float32) { + p.SetValue(newFloat32Value(target)) +} + +// Float32List accumulates float32 values into a slice. +func (p *parserMixin) Float32List() (target *[]float32) { + target = new([]float32) + p.Float32ListVar(target) + return +} + +func (p *parserMixin) Float32ListVar(target *[]float32) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newFloat32Value(v.(*float32)) + })) +} + +// DurationList accumulates time.Duration values into a slice. +func (p *parserMixin) DurationList() (target *[]time.Duration) { + target = new([]time.Duration) + p.DurationListVar(target) + return +} + +func (p *parserMixin) DurationListVar(target *[]time.Duration) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newDurationValue(v.(*time.Duration)) + })) +} + +// IPList accumulates net.IP values into a slice. +func (p *parserMixin) IPList() (target *[]net.IP) { + target = new([]net.IP) + p.IPListVar(target) + return +} + +func (p *parserMixin) IPListVar(target *[]net.IP) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newIPValue(v.(*net.IP)) + })) +} + +// TCPList accumulates *net.TCPAddr values into a slice. +func (p *parserMixin) TCPList() (target *[]*net.TCPAddr) { + target = new([]*net.TCPAddr) + p.TCPListVar(target) + return +} + +func (p *parserMixin) TCPListVar(target *[]*net.TCPAddr) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newTCPAddrValue(v.(**net.TCPAddr)) + })) +} + +// ExistingFiles accumulates string values into a slice. +func (p *parserMixin) ExistingFiles() (target *[]string) { + target = new([]string) + p.ExistingFilesVar(target) + return +} + +func (p *parserMixin) ExistingFilesVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingFileValue(v.(*string)) + })) +} + +// ExistingDirs accumulates string values into a slice. +func (p *parserMixin) ExistingDirs() (target *[]string) { + target = new([]string) + p.ExistingDirsVar(target) + return +} + +func (p *parserMixin) ExistingDirsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingDirValue(v.(*string)) + })) +} + +// ExistingFilesOrDirs accumulates string values into a slice. +func (p *parserMixin) ExistingFilesOrDirs() (target *[]string) { + target = new([]string) + p.ExistingFilesOrDirsVar(target) + return +} + +func (p *parserMixin) ExistingFilesOrDirsVar(target *[]string) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newExistingFileOrDirValue(v.(*string)) + })) +} + +// -- *regexp.Regexp Value +type regexpValue struct{ v **regexp.Regexp } + +func newRegexpValue(p **regexp.Regexp) *regexpValue { + return ®expValue{p} +} + +func (f *regexpValue) Set(s string) error { + v, err := regexp.Compile(s) + if err == nil { + *f.v = (*regexp.Regexp)(v) + } + return err +} + +func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) } + +func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Regexp parses the next command-line value as *regexp.Regexp. +func (p *parserMixin) Regexp() (target **regexp.Regexp) { + target = new(*regexp.Regexp) + p.RegexpVar(target) + return +} + +func (p *parserMixin) RegexpVar(target **regexp.Regexp) { + p.SetValue(newRegexpValue(target)) +} + +// RegexpList accumulates *regexp.Regexp values into a slice. +func (p *parserMixin) RegexpList() (target *[]*regexp.Regexp) { + target = new([]*regexp.Regexp) + p.RegexpListVar(target) + return +} + +func (p *parserMixin) RegexpListVar(target *[]*regexp.Regexp) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newRegexpValue(v.(**regexp.Regexp)) + })) +} + +// -- net.IP Value +type resolvedIPValue struct{ v *net.IP } + +func newResolvedIPValue(p *net.IP) *resolvedIPValue { + return &resolvedIPValue{p} +} + +func (f *resolvedIPValue) Set(s string) error { + v, err := resolveHost(s) + if err == nil { + *f.v = (net.IP)(v) + } + return err +} + +func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) } + +func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Resolve a hostname or IP to an IP. +func (p *parserMixin) ResolvedIP() (target *net.IP) { + target = new(net.IP) + p.ResolvedIPVar(target) + return +} + +func (p *parserMixin) ResolvedIPVar(target *net.IP) { + p.SetValue(newResolvedIPValue(target)) +} + +// ResolvedIPList accumulates net.IP values into a slice. +func (p *parserMixin) ResolvedIPList() (target *[]net.IP) { + target = new([]net.IP) + p.ResolvedIPListVar(target) + return +} + +func (p *parserMixin) ResolvedIPListVar(target *[]net.IP) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newResolvedIPValue(v.(*net.IP)) + })) +} + +// -- []byte Value +type hexBytesValue struct{ v *[]byte } + +func newHexBytesValue(p *[]byte) *hexBytesValue { + return &hexBytesValue{p} +} + +func (f *hexBytesValue) Set(s string) error { + v, err := hex.DecodeString(s) + if err == nil { + *f.v = ([]byte)(v) + } + return err +} + +func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) } + +func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f.v) } + +// Bytes as a hex string. +func (p *parserMixin) HexBytes() (target *[]byte) { + target = new([]byte) + p.HexBytesVar(target) + return +} + +func (p *parserMixin) HexBytesVar(target *[]byte) { + p.SetValue(newHexBytesValue(target)) +} + +// HexBytesList accumulates []byte values into a slice. +func (p *parserMixin) HexBytesList() (target *[][]byte) { + target = new([][]byte) + p.HexBytesListVar(target) + return +} + +func (p *parserMixin) HexBytesListVar(target *[][]byte) { + p.SetValue(newAccumulator(target, func(v interface{}) Value { + return newHexBytesValue(v.(*[]byte)) + })) +} diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore new file mode 100644 index 0000000000..12411127b3 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -0,0 +1,6 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode diff --git a/vendor/gopkg.in/ini.v1/.travis.yml b/vendor/gopkg.in/ini.v1/.travis.yml new file mode 100644 index 0000000000..c8ea49ccc6 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + +script: + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey + - mkdir -p $HOME/gopath/src/gopkg.in + - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 + - cd $HOME/gopath/src/gopkg.in/ini.v1 + - go test -v -cover -race diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE new file mode 100644 index 0000000000..d361bbcdf5 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile new file mode 100644 index 0000000000..af27ff0768 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md new file mode 100644 index 0000000000..ae4dfc3a5a --- /dev/null +++ b/vendor/gopkg.in/ini.v1/README.md @@ -0,0 +1,46 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.6**. + +To use a tagged revision: + +```sh +$ go get gopkg.in/ini.v1 +``` + +To use with latest changes: + +```sh +$ go get github.com/go-ini/ini +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go new file mode 100644 index 0000000000..80afe74315 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go new file mode 100644 index 0000000000..0ed0eafd02 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/file.go @@ -0,0 +1,418 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // Actual data is stored here. + sections map[string]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + "=" + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go new file mode 100644 index 0000000000..f827a1ef99 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -0,0 +1,219 @@ +// +build go1.6 + +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.42.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Place custom spaces when PrettyFormat and PrettyEqual are both disabled + DefaultFormatLeft = "" + DefaultFormatRight = "" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Place spaces around "=" sign even when PrettyFormat is false + PrettyEqual = false + + // Explicitly write DEFAULT section header + DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go new file mode 100644 index 0000000000..0fee0dc7e4 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/key.go @@ -0,0 +1,752 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx += 1 + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + valInt64, err := strconv.ParseInt(str, 0, 64) + val := int(valInt64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 0, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 0, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 0, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go new file mode 100644 index 0000000000..f20073d1b4 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -0,0 +1,488 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, + parserBufferSize int, + ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment, preserveSurroundedQuote bool) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if unescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if unescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !ignoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + var i int + if spaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !preserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && unescapeValueCommentSymbols { + if strings.Contains(line, `\;`) { + line = strings.Replace(line, `\;`, ";", -1) + } + if strings.Contains(line, `\#`) { + line = strings.Replace(line, `\#`, "#", -1) + } + } else if allowPythonMultilines && lastChar == '\n' { + parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + val := line + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil { + if peekErr == io.EOF { + return val, nil + } + return "", peekErr + } + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + if len(peekMatches) != 3 { + return val, nil + } + + // NOTE: Return if not a python-ini multi-line value. + currentIdentSize := len(peekMatches[1]) + if currentIdentSize <= 0 { + return val, nil + } + + // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.readUntil('\n') + if err != nil { + return "", err + } + + val += fmt.Sprintf("\n%s", peekMatches[2]) + } + } + + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DEFAULT_SECTION + if f.options.Insensitive { + name = strings.ToLower(DEFAULT_SECTION) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 1kb at a time. + currentPeekSize := 1024 + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) { + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment, + f.options.PreserveSurroundedQuote) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment, + f.options.PreserveSurroundedQuote) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go new file mode 100644 index 0000000000..bc32c620d6 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/section.go @@ -0,0 +1,259 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +// TODO: delete me in v2 +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go new file mode 100644 index 0000000000..a9dfed078a --- /dev/null +++ b/vendor/gopkg.in/ini.v1/struct.go @@ -0,0 +1,512 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ab7f699d5c..d200961eb4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -34,6 +34,9 @@ github.com/NYTimes/gziphandler github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc +# github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 +github.com/alecthomas/template +github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 github.com/alecthomas/units # github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 @@ -235,6 +238,9 @@ github.com/gorilla/mux github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/grpc-ecosystem/go-grpc-middleware +github.com/grpc-ecosystem/go-grpc-middleware/tags +github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing +github.com/grpc-ecosystem/go-grpc-middleware/util/metautils # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus # github.com/grpc-ecosystem/grpc-gateway v1.9.5 @@ -304,6 +310,13 @@ github.com/mattes/migrate/pipe github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/dns v1.1.15 github.com/miekg/dns +# github.com/minio/minio-go/v6 v6.0.27-0.20190529152532-de69c0e465ed +github.com/minio/minio-go/v6 +github.com/minio/minio-go/v6/pkg/credentials +github.com/minio/minio-go/v6/pkg/encrypt +github.com/minio/minio-go/v6/pkg/s3signer +github.com/minio/minio-go/v6/pkg/s3utils +github.com/minio/minio-go/v6/pkg/set # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.1.2 @@ -472,6 +485,23 @@ github.com/spf13/pflag # github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/assert github.com/stretchr/testify/require +# github.com/thanos-io/thanos v0.7.0 +github.com/thanos-io/thanos/pkg/block +github.com/thanos-io/thanos/pkg/block/metadata +github.com/thanos-io/thanos/pkg/compact/downsample +github.com/thanos-io/thanos/pkg/component +github.com/thanos-io/thanos/pkg/extprom +github.com/thanos-io/thanos/pkg/model +github.com/thanos-io/thanos/pkg/objstore +github.com/thanos-io/thanos/pkg/objstore/s3 +github.com/thanos-io/thanos/pkg/pool +github.com/thanos-io/thanos/pkg/runutil +github.com/thanos-io/thanos/pkg/shipper +github.com/thanos-io/thanos/pkg/store +github.com/thanos-io/thanos/pkg/store/cache +github.com/thanos-io/thanos/pkg/store/storepb +github.com/thanos-io/thanos/pkg/strutil +github.com/thanos-io/thanos/pkg/tracing # github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d github.com/tinylib/msgp/msgp # github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 @@ -619,7 +649,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.uber.org/atomic v1.3.2 +# go.uber.org/atomic v1.4.0 go.uber.org/atomic # go.uber.org/multierr v1.1.0 go.uber.org/multierr @@ -631,7 +661,9 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 +golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 @@ -650,6 +682,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/netutil +golang.org/x/net/publicsuffix golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 @@ -661,6 +694,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 +golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 @@ -759,10 +793,14 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap +# gopkg.in/alecthomas/kingpin.v2 v2.2.6 +gopkg.in/alecthomas/kingpin.v2 # gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/fsnotify/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/ini.v1 v1.42.0 +gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 # k8s.io/api v0.0.0-20190813020757-36bff7324fb7