diff --git a/go.mod b/go.mod index bf09914ae83c3..dc919d144b3c3 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.3.2 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/cortexproject/cortex v0.6.1-0.20200219140319-baae166e3335 + github.com/cortexproject/cortex v0.7.0-rc.0 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 @@ -39,24 +39,23 @@ require ( github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v1.2.1 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 - github.com/prometheus/common v0.7.0 - github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef + github.com/prometheus/client_golang v1.5.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 + github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/stretchr/testify v1.5.1 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.20.1+incompatible github.com/ugorji/go v1.1.7 // indirect - github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 + github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607 // indirect golang.org/x/net v0.0.0-20191112182307-2180aed22343 - golang.org/x/sys v0.0.0-20191218084908-4a24b4065292 // indirect google.golang.org/grpc v1.25.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 - gopkg.in/yaml.v2 v2.2.5 + gopkg.in/yaml.v2 v2.2.7 k8s.io/klog v1.0.0 ) diff --git a/go.sum b/go.sum index f5b272d8b82f0..bab80c7aafb80 100644 --- a/go.sum +++ b/go.sum @@ -60,7 +60,6 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -71,8 +70,6 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -96,8 +93,6 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -107,8 +102,6 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.22.4 h1:Mcq67g9mZEBvBuj/x7mF9KCyw5M8/4I/cjQPkdCsq0I= -github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -165,8 +158,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cortexproject/cortex v0.6.1-0.20200219140319-baae166e3335 h1:sM8LmsJItJdliH2ZaOghwa9CQ8kRcy+90Te/s14c5RA= -github.com/cortexproject/cortex v0.6.1-0.20200219140319-baae166e3335/go.mod h1:xArTQk2WBKDQ8YrIUMLfIuecuV0dKWyVVyTgRF/+a1E= +github.com/cortexproject/cortex v0.7.0-rc.0 h1:oa/RzR9E09/5AkmTPGk97ObbhZmB5TycFzL59inProQ= +github.com/cortexproject/cortex v0.7.0-rc.0/go.mod h1:aiDfjSBZGE+q213mWACqjawNVN9CqFG4F+20TkeChA0= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -233,8 +226,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= -github.com/fluent/fluent-logger-golang v1.2.1 h1:CMA+mw2zMiOGEOarZtaqM3GBWT1IVLNncNi0nKELtmU= -github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= @@ -282,7 +273,7 @@ github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -298,7 +289,7 @@ github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= @@ -319,6 +310,7 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/status v1.0.3 h1:WkVBY59mw7qUNTr/bLwO7J2vesJ0rQ2C3tMXrTd3w5M= @@ -355,6 +347,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -378,8 +372,6 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= @@ -467,8 +459,6 @@ github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8 github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= -github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -486,8 +476,6 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -564,7 +552,7 @@ github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= @@ -642,8 +630,6 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8 h1:jkUFVqrKRttbdDqkTrvOmHxfqIsJK0Oe2WGi1ACAE+M= -github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible h1:5isCJDRADbeSlWx6KVXAYwrcihyCGVXr7GNCdLEVDr8= github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -655,31 +641,34 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= -github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.5.0 h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A= +github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -691,11 +680,12 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6 h1:0qbH+Yqu/cj1ViVLvEWCP6qMQ4efWUj6bQqOEA0V0U4= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f h1:7C9G4yUogM8QP85pmf11vlBPuV6u2mPbqvbjPVKcNis= -github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= -github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef h1:pYYKXo/zGx25kyViw+Gdbxd0ItIg+vkVKpwgWUEyIc4= -github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= +github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= +github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= +github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -758,11 +748,9 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= +github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d h1:Ninez2SUm08xpmnw7kVxCeOc3DahF6IuMuRMCdM4wTQ= -github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -782,10 +770,8 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1 h1:qi+YkNiB7T3Ikw1DoDIFhdAPbDU7fUPDsKrUoZdupnQ= -github.com/weaveworks/billing-client v0.0.0-20171006123215-be0d55e547b1/go.mod h1:7gGdEUJaCrSlWi/mjd68CZv0sfqektYPDcro9cE+M9k= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1 h1:nhoCrldzSm1le34sZfSNyTELYxIDaAmDw6PPVoEj5Mw= -github.com/weaveworks/common v0.0.0-20200201141823-27e183090ab1/go.mod h1:KLGX4H1D0lPVfHS/hqO9yrKvDzaT0bqYftIW43iLaOc= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a h1:4Sm4LnEnP1yQ2NeNgGqLTuN2xrTvcBOU+EsljpB8Ed0= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= @@ -805,7 +791,6 @@ go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5/go.mod h1:N0RPWo9FXJYZQI4BTkD go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607 h1:TA51XPJi/dOGnzp82lfN1wh8ijEz3BZEiKphiurSzLU= go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607/go.mod h1:tQYIqsNuGzkF9ncfEtoEX0qkoBhzw6ih5N1xcdGnvek= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -838,8 +823,6 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -904,7 +887,6 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= @@ -916,8 +898,8 @@ golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191218084908-4a24b4065292 h1:Y8q0zsdcgAd+JU8VUA8p8Qv2YhuY9zevDG2ORt5qBUI= -golang.org/x/sys v0.0.0-20191218084908-4a24b4065292/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -965,6 +947,8 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -1042,6 +1026,9 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1074,13 +1061,9 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6 h1:s9IxTKe9GwDH0S/WaX62nFYr0or32DsTWex9AileL7U= -k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a h1:uy5HAgt4Ha5rEMbhZA+aM1j2cq5LmR6LQ71EYC2sVH4= -k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 28c0e9752032d..47495336c60c5 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -12,6 +12,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" cortex_util "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/pkg/errors" "github.com/go-kit/kit/log/level" "github.com/opentracing/opentracing-go" @@ -111,7 +113,10 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr return nil, err } - distributorsRing.Start() + err = services.StartAndAwaitRunning(context.Background(), distributorsRing) + if err != nil { + return nil, err + } ingestionRateStrategy = newGlobalIngestionRateStrategy(overrides, distributorsRing) } else { @@ -128,13 +133,18 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), } + if err := services.StartAndAwaitRunning(context.Background(), d.pool); err != nil { + return nil, errors.Wrap(err, "starting client pool") + } + return &d, nil } func (d *Distributor) Stop() { if d.distributorsRing != nil { - d.distributorsRing.Shutdown() + _ = services.StopAndAwaitTerminated(context.Background(), d.distributorsRing) } + _ = services.StopAndAwaitTerminated(context.Background(), d.pool) } // TODO taken from Cortex, see if we can refactor out an usable interface. diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 13c31afbe6889..d1a99bdff2fe1 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -10,7 +10,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/grafana/loki/pkg/chunkenc" @@ -177,7 +176,7 @@ func newTestStore(t require.TestingT, cfg Config) (*testStore, *Ingester) { // nolint func defaultIngesterTestConfig(t *testing.T) Config { - kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, codec.NewProtoCodec("foo", ring.ProtoDescFactory)) + kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, ring.GetCodec()) require.NoError(t, err) cfg := Config{} diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index c1bdd893b1e6e..db96d44b0cc95 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -18,6 +18,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/ingester/client" @@ -147,7 +148,10 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid return nil, err } - i.lifecycler.Start() + err = services.StartAndAwaitRunning(context.Background(), i.lifecycler) + if err != nil { + return nil, err + } // Now that the lifecycler has been created, we can create the limiter // which depends on it. @@ -181,7 +185,12 @@ func (i *Ingester) Shutdown() { close(i.quit) i.done.Wait() - i.lifecycler.Shutdown() + i.stopIncomingRequests() + + err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler) + if err != nil { + level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err) + } } // Stopping helps cleaning up resources before actual shutdown diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go index 36f3e3dd01ff7..ebe7ffc7ff060 100644 --- a/pkg/ingester/transfer.go +++ b/pkg/ingester/transfer.go @@ -159,8 +159,8 @@ func (i *Ingester) checkFromIngesterIsInLeavingState(ctx context.Context, fromIn return nil } -// StopIncomingRequests implements ring.Lifecycler. -func (i *Ingester) StopIncomingRequests() { +// stopIncomingRequests is called when ingester is stopping +func (i *Ingester) stopIncomingRequests() { i.shutdownMtx.Lock() defer i.shutdownMtx.Unlock() diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go index 20cbb0489a9f5..e06572f6d657b 100644 --- a/pkg/ingester/transfer_test.go +++ b/pkg/ingester/transfer_test.go @@ -8,17 +8,17 @@ import ( "testing" "time" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/go-kit/kit/log/level" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" - "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/loki/pkg/ingester/client" "github.com/grafana/loki/pkg/logproto" @@ -120,7 +120,7 @@ type testIngesterFactory struct { } func newTestIngesterFactory(t *testing.T) *testIngesterFactory { - kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, codec.NewProtoCodec("foo", ring.ProtoDescFactory)) + kvClient, err := kv.NewClient(kv.Config{Store: "inmemory"}, ring.GetCodec()) require.NoError(t, err) return &testIngesterFactory{ @@ -196,7 +196,11 @@ func (c *testIngesterClient) TransferChunks(context.Context, ...grpc.CallOption) // unhealthy state, permanently stuck in the handler for claiming tokens. go func() { time.Sleep(time.Millisecond * 50) - c.i.lifecycler.Shutdown() + c.i.stopIncomingRequests() // used to be called from lifecycler, now it must be called *before* stopping lifecyler. (ingester does this on shutdown) + err := services.StopAndAwaitTerminated(context.Background(), c.i.lifecycler) + if err != nil { + level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err) + } }() go func() { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 8932877c4d37b..c678aeacbd345 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -7,6 +7,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" @@ -45,6 +46,7 @@ type Config struct { Frontend frontend.Config `yaml:"frontend,omitempty"` QueryRange queryrange.Config `yaml:"query_range,omitempty"` RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config,omitempty"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist"` } // RegisterFlags registers flag. @@ -87,6 +89,7 @@ type Loki struct { frontend *frontend.Frontend stopper queryrange.Stopper runtimeConfig *runtimeconfig.Manager + memberlistKV *memberlist.KVInit httpAuthMiddleware middleware.Interface } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 1802b46852459..c950256f76424 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1,6 +1,7 @@ package loki import ( + "context" "fmt" "net/http" "os" @@ -11,8 +12,11 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/storage" "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -46,6 +50,7 @@ const ( QueryFrontend Store TableManager + MemberlistKV All ) @@ -80,6 +85,8 @@ func (m moduleName) String() string { return "query-frontend" case TableManager: return "table-manager" + case MemberlistKV: + return "memberlist-kv" case All: return "all" default: @@ -134,7 +141,11 @@ func (t *Loki) initServer() (err error) { func (t *Loki) initRing() (err error) { t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) + t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig, "ingester", ring.IngesterRingKey) + if err == nil { + err = services.StartAndAwaitRunning(context.Background(), t.ring) + } if err != nil { return } @@ -143,6 +154,10 @@ func (t *Loki) initRing() (err error) { return } +func (t *Loki) stopRing() (err error) { + return services.StopAndAwaitTerminated(context.Background(), t.ring) +} + func (t *Loki) initRuntimeConfig() (err error) { if t.cfg.RuntimeConfig.LoadPath == "" { t.cfg.RuntimeConfig.LoadPath = t.cfg.LimitsConfig.PerTenantOverrideConfig @@ -154,12 +169,14 @@ func (t *Loki) initRuntimeConfig() (err error) { validation.SetDefaultLimitsForYAMLUnmarshalling(t.cfg.LimitsConfig) t.runtimeConfig, err = runtimeconfig.NewRuntimeConfigManager(t.cfg.RuntimeConfig, prometheus.DefaultRegisterer) + if err == nil { + err = services.StartAndAwaitRunning(context.Background(), t.runtimeConfig) + } return err } func (t *Loki) stopRuntimeConfig() (err error) { - t.runtimeConfig.Stop() - return nil + return services.StopAndAwaitTerminated(context.Background(), t.runtimeConfig) } func (t *Loki) initOverrides() (err error) { @@ -168,6 +185,8 @@ func (t *Loki) initOverrides() (err error) { } func (t *Loki) initDistributor() (err error) { + t.cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) + t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.distributor, err = distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.overrides) if err != nil { return @@ -226,6 +245,7 @@ func (t *Loki) initQuerier() (err error) { func (t *Loki) initIngester() (err error) { t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) + t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.cfg.Ingester.LifecyclerConfig.ListenPort = &t.cfg.Server.GRPCListenPort t.ingester, err = ingester.New(t.cfg.Ingester, t.cfg.IngesterClient, t.store, t.overrides) if err != nil { @@ -281,18 +301,15 @@ func (t *Loki) initTableManager() error { bucketClient, err := storage.NewBucketClient(t.cfg.StorageConfig.Config) util.CheckFatal("initializing bucket client", err) - t.tableManager, err = chunk.NewTableManager(t.cfg.TableManager, t.cfg.SchemaConfig, maxChunkAgeForTableManager, tableClient, bucketClient) + t.tableManager, err = chunk.NewTableManager(t.cfg.TableManager, t.cfg.SchemaConfig, maxChunkAgeForTableManager, tableClient, bucketClient, prometheus.DefaultRegisterer) if err != nil { return err } - - t.tableManager.Start() - return nil + return services.StartAndAwaitRunning(context.Background(), t.tableManager) } func (t *Loki) stopTableManager() error { - t.tableManager.Stop() - return nil + return services.StopAndAwaitTerminated(context.Background(), t.tableManager) } func (t *Loki) initStore() (err error) { @@ -307,7 +324,7 @@ func (t *Loki) stopStore() error { func (t *Loki) initQueryFrontend() (err error) { level.Debug(util.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend)) - t.frontend, err = frontend.New(t.cfg.Frontend, util.Logger) + t.frontend, err = frontend.New(t.cfg.Frontend, util.Logger, prometheus.DefaultRegisterer) if err != nil { return } @@ -315,7 +332,7 @@ func (t *Loki) initQueryFrontend() (err error) { "config", fmt.Sprintf("%+v", t.cfg.QueryRange), "limits", fmt.Sprintf("%+v", t.cfg.LimitsConfig), ) - tripperware, stopper, err := queryrange.NewTripperware(t.cfg.QueryRange, util.Logger, t.overrides) + tripperware, stopper, err := queryrange.NewTripperware(t.cfg.QueryRange, util.Logger, t.overrides, prometheus.DefaultRegisterer) if err != nil { return err } @@ -347,6 +364,20 @@ func (t *Loki) stopQueryFrontend() error { return nil } +func (t *Loki) initMemberlistKV() error { + t.cfg.MemberlistKV.MetricsRegisterer = prometheus.DefaultRegisterer + t.cfg.MemberlistKV.Codecs = []codec.Codec{ + ring.GetCodec(), + } + t.memberlistKV = memberlist.NewKVInit(&t.cfg.MemberlistKV) + return nil +} + +func (t *Loki) stopMemberlistKV() error { + t.memberlistKV.Stop() + return nil +} + // listDeps recursively gets a list of dependencies for a passed moduleName func listDeps(m moduleName) []moduleName { deps := modules[m].deps @@ -422,9 +453,15 @@ var modules = map[moduleName]module{ stop: (*Loki).stopRuntimeConfig, }, + MemberlistKV: { + init: (*Loki).initMemberlistKV, + stop: (*Loki).stopMemberlistKV, + }, + Ring: { - deps: []moduleName{RuntimeConfig, Server}, + deps: []moduleName{RuntimeConfig, Server, MemberlistKV}, init: (*Loki).initRing, + stop: (*Loki).stopRing, }, Overrides: { @@ -445,7 +482,7 @@ var modules = map[moduleName]module{ }, Ingester: { - deps: []moduleName{Store, Server}, + deps: []moduleName{Store, Server, MemberlistKV}, init: (*Loki).initIngester, stop: (*Loki).stopIngester, stopping: (*Loki).stoppingIngester, diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 39e647fa84f38..8b9a04a656b24 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -6,7 +6,9 @@ import ( "net/http" "time" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" @@ -84,6 +86,10 @@ func newQuerier(cfg Config, clientCfg client.Config, clientFactory cortex_client limits: limits, } querier.engine = logql.NewEngine(cfg.Engine, &querier) + err := services.StartAndAwaitRunning(context.Background(), querier.pool) + if err != nil { + return nil, errors.Wrap(err, "querier pool") + } return &querier, nil } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 21ed94a1538e0..5c68d5db4b957 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -235,6 +235,14 @@ func (s *storeMock) LabelNamesForMetricName(ctx context.Context, userID string, return args.Get(0).([]string), args.Error(1) } +func (s *storeMock) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { + panic("don't call me please") +} + +func (s *storeMock) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { + panic("don't call me please") +} + func (s *storeMock) Stop() { } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 42ee689002599..89b0af7e9a875 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -46,6 +46,12 @@ func (r *LokiRequest) WithStartEnd(s int64, e int64) queryrange.Request { return &new } +func (r *LokiRequest) WithQuery(query string) queryrange.Request { + new := *r + new.Query = query + return &new +} + func (codec) DecodeRequest(_ context.Context, r *http.Request) (queryrange.Request, error) { if err := r.ParseForm(); err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 95689f332295b..8f8904a36185b 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -10,6 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/httpgrpc" @@ -32,21 +33,23 @@ type Stopper interface { } // NewTripperware returns a Tripperware configured with middlewares to align, split and cache requests. -func NewTripperware(cfg Config, log log.Logger, limits Limits) (frontend.Tripperware, Stopper, error) { +func NewTripperware(cfg Config, log log.Logger, limits Limits, registerer prometheus.Registerer) (frontend.Tripperware, Stopper, error) { // Ensure that QuerySplitDuration uses configuration defaults. // This avoids divide by zero errors when determining cache keys where user specific overrides don't exist. limits = WithDefaultLimits(limits, cfg.Config) - metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, lokiCodec, prometheusResponseExtractor) + instrumentMetrics := queryrange.NewInstrumentMiddlewareMetrics(registerer) + retryMetrics := queryrange.NewRetryMiddlewareMetrics(registerer) + metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, lokiCodec, prometheusResponseExtractor, instrumentMetrics, retryMetrics) if err != nil { return nil, nil, err } - logFilterTripperware, err := NewLogFilterTripperware(cfg, log, limits, lokiCodec) + logFilterTripperware, err := NewLogFilterTripperware(cfg, log, limits, lokiCodec, instrumentMetrics, retryMetrics) if err != nil { return nil, nil, err } - return frontend.Tripperware(func(next http.RoundTripper) http.RoundTripper { + return func(next http.RoundTripper) http.RoundTripper { metricRT := metricsTripperware(next) logFilterRT := logFilterTripperware(next) return frontend.RoundTripFunc(func(req *http.Request) (*http.Response, error) { @@ -81,7 +84,7 @@ func NewTripperware(cfg Config, log log.Logger, limits Limits) (frontend.Tripper } return next.RoundTrip(req) }) - }), cache, nil + }, cache, nil } // NewLogFilterTripperware creates a new frontend tripperware responsible for handling log requests with regex. @@ -90,20 +93,23 @@ func NewLogFilterTripperware( log log.Logger, limits Limits, codec queryrange.Codec, + instrumentMetrics *queryrange.InstrumentMiddlewareMetrics, + retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, ) (frontend.Tripperware, error) { queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.LimitsMiddleware(limits)} if cfg.SplitQueriesByInterval != 0 { - queryRangeMiddleware = append(queryRangeMiddleware, queryrange.InstrumentMiddleware("split_by_interval"), SplitByIntervalMiddleware(limits, codec)) + queryRangeMiddleware = append(queryRangeMiddleware, queryrange.InstrumentMiddleware("split_by_interval", instrumentMetrics), SplitByIntervalMiddleware(limits, codec)) } if cfg.MaxRetries > 0 { - queryRangeMiddleware = append(queryRangeMiddleware, queryrange.InstrumentMiddleware("retry"), queryrange.NewRetryMiddleware(log, cfg.MaxRetries)) + queryRangeMiddleware = append(queryRangeMiddleware, queryrange.InstrumentMiddleware("retry", instrumentMetrics), queryrange.NewRetryMiddleware(log, cfg.MaxRetries, retryMiddlewareMetrics)) } - return frontend.Tripperware(func(next http.RoundTripper) http.RoundTripper { + + return func(next http.RoundTripper) http.RoundTripper { if len(queryRangeMiddleware) > 0 { return queryrange.NewRoundTripper(next, codec, queryRangeMiddleware...) } return next - }), nil + }, nil } // NewMetricTripperware creates a new frontend tripperware responsible for handling metric queries @@ -113,13 +119,14 @@ func NewMetricTripperware( limits Limits, codec queryrange.Codec, extractor queryrange.Extractor, + instrumentMetrics *queryrange.InstrumentMiddlewareMetrics, + retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, ) (frontend.Tripperware, Stopper, error) { - queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.LimitsMiddleware(limits)} if cfg.AlignQueriesWithStep { queryRangeMiddleware = append( queryRangeMiddleware, - queryrange.InstrumentMiddleware("step_align"), + queryrange.InstrumentMiddleware("step_align", instrumentMetrics), queryrange.StepAlignMiddleware, ) } @@ -131,7 +138,7 @@ func NewMetricTripperware( queryRangeMiddleware = append( queryRangeMiddleware, - queryrange.InstrumentMiddleware("split_by_interval"), + queryrange.InstrumentMiddleware("split_by_interval", instrumentMetrics), SplitByIntervalMiddleware(limits, codec), ) @@ -151,7 +158,7 @@ func NewMetricTripperware( c = cache queryRangeMiddleware = append( queryRangeMiddleware, - queryrange.InstrumentMiddleware("results_cache"), + queryrange.InstrumentMiddleware("results_cache", instrumentMetrics), queryCacheMiddleware, ) } @@ -159,12 +166,12 @@ func NewMetricTripperware( if cfg.MaxRetries > 0 { queryRangeMiddleware = append( queryRangeMiddleware, - queryrange.InstrumentMiddleware("retry"), - queryrange.NewRetryMiddleware(log, cfg.MaxRetries), + queryrange.InstrumentMiddleware("retry", instrumentMetrics), + queryrange.NewRetryMiddleware(log, cfg.MaxRetries, retryMiddlewareMetrics), ) } - return frontend.Tripperware(func(next http.RoundTripper) http.RoundTripper { + return func(next http.RoundTripper) http.RoundTripper { // Finally, if the user selected any query range middleware, stitch it in. if len(queryRangeMiddleware) > 0 { rt := queryrange.NewRoundTripper(next, codec, queryRangeMiddleware...) @@ -176,5 +183,5 @@ func NewMetricTripperware( }) } return next - }), c, nil + }, c, nil } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 7bd69f262ad38..15a957bbddd08 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -75,7 +75,7 @@ var ( // those tests are mostly for testing the glue between all component and make sure they activate correctly. func TestMetricsTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}) + tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, nil) if stopper != nil { defer stopper.Stop() } @@ -139,7 +139,7 @@ func TestMetricsTripperware(t *testing.T) { func TestLogFilterTripperware(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}) + tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, nil) if stopper != nil { defer stopper.Stop() } @@ -186,7 +186,7 @@ func TestLogFilterTripperware(t *testing.T) { } func TestLogNoRegex(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}) + tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, nil) if stopper != nil { defer stopper.Stop() } @@ -220,7 +220,7 @@ func TestLogNoRegex(t *testing.T) { } func TestUnhandledPath(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}) + tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, nil) if stopper != nil { defer stopper.Stop() } @@ -244,7 +244,7 @@ func TestUnhandledPath(t *testing.T) { } func TestRegexpParamsSupport(t *testing.T) { - tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}) + tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, nil) if stopper != nil { defer stopper.Stop() } diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 0619148e1a1e1..70849ac9d064a 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -104,14 +104,20 @@ func newQuery(query string, start, end time.Time, direction logproto.Direction) type mockChunkStore struct { chunks []chunk.Chunk + client *mockChunkStoreClient } +// mockChunkStore cannot implement both chunk.Store and chunk.Client, +// since there is a conflict in signature for DeleteChunk method. +var _ chunk.Store = &mockChunkStore{} +var _ chunk.Client = &mockChunkStoreClient{} + func newMockChunkStore(streams []*logproto.Stream) *mockChunkStore { chunks := make([]chunk.Chunk, 0, len(streams)) for _, s := range streams { chunks = append(chunks, newChunk(*s)) } - return &mockChunkStore{chunks: chunks} + return &mockChunkStore{chunks: chunks, client: &mockChunkStoreClient{chunks: chunks}} } func (m *mockChunkStore) Put(ctx context.Context, chunks []chunk.Chunk) error { return nil } func (m *mockChunkStore) PutOne(ctx context.Context, from, through model.Time, chunk chunk.Chunk) error { @@ -123,28 +129,19 @@ func (m *mockChunkStore) LabelValuesForMetricName(ctx context.Context, userID st func (m *mockChunkStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { return nil, nil } + +func (m *mockChunkStore) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { + return nil +} + +func (m *mockChunkStore) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { + return nil +} func (m *mockChunkStore) Stop() {} func (m *mockChunkStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { return nil, nil } -// PutChunks implements ObjectClient from Fetcher -func (m *mockChunkStore) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { return nil } - -// GetChunks implements ObjectClient from Fetcher -func (m *mockChunkStore) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { - var res []chunk.Chunk - for _, c := range chunks { - for _, sc := range m.chunks { - // only returns chunks requested using the external key - if c.ExternalKey() == sc.ExternalKey() { - res = append(res, sc) - } - } - } - return res, nil -} - func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { refs := make([]chunk.Chunk, 0, len(m.chunks)) // transform real chunks into ref chunks. @@ -155,13 +152,42 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, } refs = append(refs, r) } - f, err := chunk.NewChunkFetcher(cache.Config{}, false, m) + f, err := chunk.NewChunkFetcher(cache.Config{}, false, m.client) if err != nil { panic(err) } return [][]chunk.Chunk{refs}, []*chunk.Fetcher{f}, nil } +type mockChunkStoreClient struct { + chunks []chunk.Chunk +} + +func (m mockChunkStoreClient) Stop() { + panic("implement me") +} + +func (m mockChunkStoreClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { + return nil +} + +func (m mockChunkStoreClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { + var res []chunk.Chunk + for _, c := range chunks { + for _, sc := range m.chunks { + // only returns chunks requested using the external key + if c.ExternalKey() == sc.ExternalKey() { + res = append(res, sc) + } + } + } + return res, nil +} + +func (m mockChunkStoreClient) DeleteChunk(ctx context.Context, chunkID string) error { + return nil +} + var streamsFixture = []*logproto.Stream{ { Labels: "{foo=\"bar\"}", diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 05ad48570e551..88d71bd84e328 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -245,7 +245,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write if awsErr, ok := err.(awserr.Error); ok && ((awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException) || request.Retryable()) { logWriteRetry(ctx, requests) unprocessed.TakeReqs(requests, -1) - a.writeThrottle.WaitN(ctx, len(requests)) + _ = a.writeThrottle.WaitN(ctx, len(requests)) backoff.Wait() continue } else if ok && awsErr.Code() == validationException { @@ -269,7 +269,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write unprocessedItems := dynamoDBWriteBatch(resp.UnprocessedItems) if len(unprocessedItems) > 0 { logWriteRetry(ctx, unprocessedItems) - a.writeThrottle.WaitN(ctx, unprocessedItems.Len()) + _ = a.writeThrottle.WaitN(ctx, unprocessedItems.Len()) unprocessed.TakeReqs(unprocessedItems, -1) } @@ -598,6 +598,11 @@ func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []c return result, nil } +func (a dynamoDBStorageClient) DeleteChunk(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from DynamoDB + return chunk.ErrMethodNotImplemented +} + func processChunkResponse(response *dynamodb.BatchGetItemOutput, chunksByKey map[string]chunk.Chunk) ([]chunk.Chunk, error) { result := []chunk.Chunk{} decodeContext := chunk.NewDecodeContext() @@ -749,6 +754,11 @@ func (b dynamoDBWriteBatch) Add(tableName, hashValue string, rangeValue []byte, }) } +func (b dynamoDBWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { + // ToDo: implement this to support deleting index entries from DynamoDB + panic("DynamoDB does not support Deleting index entries yet") +} + // Fill 'b' with WriteRequests from 'from' until 'b' has at most max requests. Remove those requests from 'from'. func (b dynamoDBWriteBatch) TakeReqs(from dynamoDBWriteBatch, max int) { outLen, inLen := b.Len(), from.Len() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go index 8a41ae4f70590..84ae8c3c22990 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go @@ -80,7 +80,7 @@ func (d dynamoTableClient) backoffAndRetry(ctx context.Context, fn func(context. func (d callManager) backoffAndRetry(ctx context.Context, fn func(context.Context) error) error { if d.limiter != nil { // Tests will have a nil limiter. - d.limiter.Wait(ctx) + _ = d.limiter.Wait(ctx) } backoff := util.NewBackoff(ctx, d.backoffConfig) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go index 5b33748a0a5d1..84fdad28eccd0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go @@ -56,6 +56,7 @@ func (a dynamoDBStorageClient) setErrorParameters(provisionedErr, errAfter int) } } +//nolint:unused //Leaving this around in the case we need to create a table via mock this is useful. func (m *mockDynamoDBClient) createTable(name string) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 6d12f2b0d3f09..339945cf35d47 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -90,6 +90,11 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { // Stop fulfills the chunk.ObjectClient interface func (a *S3ObjectClient) Stop() {} +func (a *S3ObjectClient) DeleteObject(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from S3 + return chunk.ErrMethodNotImplemented +} + // bucketFromKey maps a key to a bucket name func (a *S3ObjectClient) bucketFromKey(key string) string { if len(a.bucketNames) == 0 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go index 428474cf04c4e..df4a337351656 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util/flagext" ) const blobURLFmt = "https://%s.blob.core.windows.net/%s/%s" @@ -20,23 +21,23 @@ const containerURLFmt = "https://%s.blob.core.windows.net/%s" // BlobStorageConfig defines the configurable flags that can be defined when using azure blob storage. type BlobStorageConfig struct { - ContainerName string `yaml:"container_name"` - AccountName string `yaml:"account_name"` - AccountKey string `yaml:"account_key"` - DownloadBufferSize int `yaml:"download_buffer_size"` - UploadBufferSize int `yaml:"upload_buffer_size"` - UploadBufferCount int `yaml:"upload_buffer_count"` - RequestTimeout time.Duration `yaml:"request_timeout"` - MaxRetries int `yaml:"max_retries"` - MinRetryDelay time.Duration `yaml:"min_retry_delay"` - MaxRetryDelay time.Duration `yaml:"max_retry_delay"` + ContainerName string `yaml:"container_name"` + AccountName string `yaml:"account_name"` + AccountKey flagext.Secret `yaml:"account_key"` + DownloadBufferSize int `yaml:"download_buffer_size"` + UploadBufferSize int `yaml:"upload_buffer_size"` + UploadBufferCount int `yaml:"upload_buffer_count"` + RequestTimeout time.Duration `yaml:"request_timeout"` + MaxRetries int `yaml:"max_retries"` + MinRetryDelay time.Duration `yaml:"min_retry_delay"` + MaxRetryDelay time.Duration `yaml:"max_retry_delay"` } // RegisterFlags adds the flags required to config this to the given FlagSet func (c *BlobStorageConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&c.ContainerName, "azure.container-name", "cortex", "Name of the blob container used to store chunks. Defaults to `cortex`. This container must be created before running cortex.") f.StringVar(&c.AccountName, "azure.account-name", "", "The Microsoft Azure account name to be used") - f.StringVar(&c.AccountKey, "azure.account-key", "", "The Microsoft Azure account key to use.") + f.Var(&c.AccountKey, "azure.account-key", "The Microsoft Azure account key to use.") f.DurationVar(&c.RequestTimeout, "azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage. Defaults to 30 seconds.") f.IntVar(&c.DownloadBufferSize, "azure.download-buffer-size", 512000, "Preallocated buffer size for downloads (default is 512KB)") f.IntVar(&c.UploadBufferSize, "azure.upload-buffer-size", 256000, "Preallocated buffer size for up;oads (default is 256KB)") @@ -139,7 +140,7 @@ func (b *BlobStorage) buildContainerURL() (azblob.ContainerURL, error) { } func (b *BlobStorage) newPipeline() (pipeline.Pipeline, error) { - credential, err := azblob.NewSharedKeyCredential(b.cfg.AccountName, b.cfg.AccountKey) + credential, err := azblob.NewSharedKeyCredential(b.cfg.AccountName, b.cfg.AccountKey.Value) if err != nil { return nil, err } @@ -182,3 +183,8 @@ func (b *BlobStorage) List(ctx context.Context, prefix string) ([]chunk.StorageO return storageObjects, nil } + +func (b *BlobStorage) DeleteObject(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from Azure BlobStorage + return chunk.ErrMethodNotImplemented +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go index 1cbdfdde9388b..861d7e8160430 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go @@ -33,7 +33,7 @@ type BackgroundConfig struct { // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (cfg *BackgroundConfig) RegisterFlagsWithPrefix(prefix string, description string, f *flag.FlagSet) { f.IntVar(&cfg.WriteBackGoroutines, prefix+"memcache.write-back-goroutines", 10, description+"How many goroutines to use to write back to memcache.") - f.IntVar(&cfg.WriteBackBuffer, prefix+"memcache.write-back-buffer", 10000, description+"How many chunks to buffer for background write back.") + f.IntVar(&cfg.WriteBackBuffer, prefix+"memcache.write-back-buffer", 10000, description+"How many key batches to buffer for background write-back.") } type backgroundCache struct { @@ -80,21 +80,33 @@ func (c *backgroundCache) Stop() { c.Cache.Stop() } +const keysPerBatch = 100 + // Store writes keys for the cache in the background. func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byte) { - bgWrite := backgroundWrite{ - keys: keys, - bufs: bufs, - } - select { - case c.bgWrites <- bgWrite: - c.queueLength.Add(float64(len(keys))) - default: - c.droppedWriteBack.Add(float64(len(keys))) - sp := opentracing.SpanFromContext(ctx) - if sp != nil { - sp.LogFields(otlog.Int("dropped", len(keys))) + for len(keys) > 0 { + num := keysPerBatch + if num > len(keys) { + num = len(keys) + } + + bgWrite := backgroundWrite{ + keys: keys[:num], + bufs: bufs[:num], + } + select { + case c.bgWrites <- bgWrite: + c.queueLength.Add(float64(len(keys))) + default: + c.droppedWriteBack.Add(float64(len(keys))) + sp := opentracing.SpanFromContext(ctx) + if sp != nil { + sp.LogFields(otlog.Int("dropped", len(keys))) + } + return // queue is full; give up } + keys = keys[num:] + bufs = bufs[num:] } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index 164d6c645b3ef..0b8a6b30407cc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -228,7 +228,7 @@ func (c *FifoCache) Get(ctx context.Context, key string) (interface{}, bool) { index, ok := c.index[key] if ok { updated := c.entries[index].updated - if c.validity == 0 || time.Now().Sub(updated) < c.validity { + if c.validity == 0 || time.Since(updated) < c.validity { return c.entries[index].value, true } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go index f425cf21f94c1..c5c43b21cec18 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go @@ -75,7 +75,7 @@ func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]b } method := i.name + ".store" - instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { + _ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys", len(keys))) i.Cache.Store(ctx, keys, bufs) @@ -91,7 +91,7 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string, method = i.name + ".fetch" ) - instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { + _ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys requested", len(keys))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go index c8a4868f76eb3..b56a9206c1bdc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go @@ -36,7 +36,7 @@ type observableVecCollector struct { func (observableVecCollector) Register() {} func (observableVecCollector) Before(method string, start time.Time) {} func (o observableVecCollector) After(method, statusCode string, start time.Time) { - o.v.WithLabelValues(method, statusCode).Observe(time.Now().Sub(start).Seconds()) + o.v.WithLabelValues(method, statusCode).Observe(time.Since(start).Seconds()) } // MemcachedConfig is config to make a Memcached @@ -135,7 +135,7 @@ func memcacheStatusCode(err error) string { // Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { - instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error { + _ = instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error { if c.cfg.BatchSize == 0 { found, bufs, missed = c.fetch(ctx, keys) return nil @@ -149,7 +149,7 @@ func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, b func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { var items map[string]*memcache.Item - instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { + err := instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { sp := opentracing.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys requested", len(keys))) @@ -166,6 +166,10 @@ func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, b return err }) + if err != nil { + return found, bufs, keys + } + for _, key := range keys { item, ok := items[key] if ok { @@ -248,7 +252,7 @@ func (c *Memcached) Stop() { // HashKey hashes key into something you can store in memcached. func HashKey(key string) string { hasher := fnv.New64a() - hasher.Write([]byte(key)) // This'll never error. + _, _ = hasher.Write([]byte(key)) // This'll never error. // Hex because memcache errors for the bytes produced by the hash. return hex.EncodeToString(hasher.Sum(nil)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go index ff21399b0d3d4..7ab48d2c67d76 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -9,6 +9,7 @@ import ( "github.com/gomodule/redigo/redis" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" ) // RedisCache type caches chunks in redis @@ -21,13 +22,13 @@ type RedisCache struct { // RedisConfig defines how a RedisCache should be constructed. type RedisConfig struct { - Endpoint string `yaml:"endpoint,omitempty"` - Timeout time.Duration `yaml:"timeout,omitempty"` - Expiration time.Duration `yaml:"expiration,omitempty"` - MaxIdleConns int `yaml:"max_idle_conns,omitempty"` - MaxActiveConns int `yaml:"max_active_conns,omitempty"` - Password string `yaml:"password"` - EnableTLS bool `yaml:"enable_tls"` + Endpoint string `yaml:"endpoint,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` + Expiration time.Duration `yaml:"expiration,omitempty"` + MaxIdleConns int `yaml:"max_idle_conns,omitempty"` + MaxActiveConns int `yaml:"max_active_conns,omitempty"` + Password flagext.Secret `yaml:"password"` + EnableTLS bool `yaml:"enable_tls"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet @@ -37,7 +38,7 @@ func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *f f.DurationVar(&cfg.Expiration, prefix+"redis.expiration", 0, description+"How long keys stay in the redis.") f.IntVar(&cfg.MaxIdleConns, prefix+"redis.max-idle-conns", 80, description+"Maximum number of idle connections in pool.") f.IntVar(&cfg.MaxActiveConns, prefix+"redis.max-active-conns", 0, description+"Maximum number of active connections in pool.") - f.StringVar(&cfg.Password, prefix+"redis.password", "", description+"Password to use when connecting to redis.") + f.Var(&cfg.Password, prefix+"redis.password", description+"Password to use when connecting to redis.") f.BoolVar(&cfg.EnableTLS, prefix+"redis.enable-tls", false, description+"Enables connecting to redis with TLS.") } @@ -53,8 +54,8 @@ func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache { if cfg.EnableTLS { options = append(options, redis.DialUseTLS(true)) } - if cfg.Password != "" { - options = append(options, redis.DialPassword(cfg.Password)) + if cfg.Password.Value != "" { + options = append(options, redis.DialPassword(cfg.Password.Value)) } c, err := redis.Dial("tcp", cfg.Endpoint, options...) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index c653dc724095b..f3aac2f86fd8e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -30,7 +30,7 @@ type Config struct { CAPath string `yaml:"CA_path,omitempty"` Auth bool `yaml:"auth,omitempty"` Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` + Password flagext.Secret `yaml:"password,omitempty"` PasswordFile string `yaml:"password_file,omitempty"` CustomAuthenticators flagext.StringSlice `yaml:"custom_authenticators"` Timeout time.Duration `yaml:"timeout,omitempty"` @@ -53,7 +53,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.CAPath, "cassandra.ca-path", "", "Path to certificate file to verify the peer.") f.BoolVar(&cfg.Auth, "cassandra.auth", false, "Enable password authentication when connecting to cassandra.") f.StringVar(&cfg.Username, "cassandra.username", "", "Username to use when connecting to cassandra.") - f.StringVar(&cfg.Password, "cassandra.password", "", "Password to use when connecting to cassandra.") + f.Var(&cfg.Password, "cassandra.password", "Password to use when connecting to cassandra.") f.StringVar(&cfg.PasswordFile, "cassandra.password-file", "", "File containing password to use when connecting to cassandra.") f.Var(&cfg.CustomAuthenticators, "cassandra.custom-authenticator", "If set, when authenticating with cassandra a custom authenticator will be expected during the handshake. This flag can be set multiple times.") f.DurationVar(&cfg.Timeout, "cassandra.timeout", 2*time.Second, "Timeout when connecting to cassandra.") @@ -64,7 +64,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } func (cfg *Config) Validate() error { - if cfg.Password != "" && cfg.PasswordFile != "" { + if cfg.Password.Value != "" && cfg.PasswordFile != "" { return errors.Errorf("The password and password_file config options are mutually exclusive.") } return nil @@ -123,7 +123,7 @@ func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error { } } if cfg.Auth { - password := cfg.Password + password := cfg.Password.Value if cfg.PasswordFile != "" { passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile) if err != nil { @@ -223,6 +223,11 @@ func (b *writeBatch) Add(tableName, hashValue string, rangeValue []byte, value [ }) } +func (b *writeBatch) Delete(tableName, hashValue string, rangeValue []byte) { + // ToDo: implement this to support deleting index entries from Cassandra + panic("Cassandra does not support Deleting index entries yet") +} + // BatchWrite implement chunk.IndexClient. func (s *StorageClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { b := batch.(*writeBatch) @@ -289,7 +294,6 @@ func (s *StorageClient) query(ctx context.Context, query chunk.IndexQuery, callb // readBatch represents a batch of rows read from Cassandra. type readBatch struct { - consumed bool rangeValue []byte value []byte } @@ -364,3 +368,8 @@ func (s *StorageClient) getChunk(ctx context.Context, decodeContext *chunk.Decod err = input.Decode(decodeContext, buf) return input, err } + +func (s *StorageClient) DeleteChunk(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from Cassandra + return chunk.ErrMethodNotImplemented +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go index 417b60a6ada25..d52acf4bc4dee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go @@ -22,10 +22,13 @@ import ( // Errors that decode can return const ( - ErrInvalidChecksum = errs.Error("invalid chunk checksum") - ErrWrongMetadata = errs.Error("wrong chunk metadata") - ErrMetadataLength = errs.Error("chunk metadata wrong length") - ErrDataLength = errs.Error("chunk data wrong length") + ErrInvalidChecksum = errs.Error("invalid chunk checksum") + ErrWrongMetadata = errs.Error("wrong chunk metadata") + ErrMetadataLength = errs.Error("chunk metadata wrong length") + ErrDataLength = errs.Error("chunk data wrong length") + ErrSliceOutOfRange = errs.Error("chunk can't be sliced out of its data range") + ErrSliceNoDataInRange = errs.Error("chunk has no data for given range to slice") + ErrSliceChunkOverflow = errs.Error("slicing should not overflow a chunk") ) var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) @@ -327,3 +330,55 @@ func (c *Chunk) Samples(from, through model.Time) ([]model.SamplePair, error) { interval := metric.Interval{OldestInclusive: from, NewestInclusive: through} return prom_chunk.RangeValues(it, interval) } + +// Slice builds a new smaller chunk with data only from given time range (inclusive) +func (c *Chunk) Slice(from, through model.Time) (*Chunk, error) { + // there should be atleast some overlap between chunk interval and slice interval + if from > c.Through || through < c.From { + return nil, ErrSliceOutOfRange + } + + itr := c.Data.NewIterator(nil) + if !itr.FindAtOrAfter(from) { + return nil, ErrSliceNoDataInRange + } + + pc, err := prom_chunk.NewForEncoding(c.Data.Encoding()) + if err != nil { + return nil, err + } + + for !itr.Value().Timestamp.After(through) { + oc, err := pc.Add(itr.Value()) + if err != nil { + return nil, err + } + + if oc != nil { + return nil, ErrSliceChunkOverflow + } + if !itr.Scan() { + break + } + } + + err = itr.Err() + if err != nil { + return nil, err + } + + if pc.Len() == 0 { + return nil, ErrSliceNoDataInRange + } + + nc := NewChunk(c.UserID, c.Fingerprint, c.Metric, pc, from, through) + return &nc, nil +} + +func intervalsOverlap(interval1, interval2 model.Interval) bool { + if interval1.Start > interval2.End || interval2.Start > interval1.End { + return false + } + + return true +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index a3dd5fcad190a..2f9b95e78aecc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -2,7 +2,6 @@ package chunk import ( "context" - "errors" "flag" "fmt" "net/http" @@ -11,6 +10,7 @@ import ( "time" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" @@ -26,6 +26,11 @@ import ( "github.com/cortexproject/cortex/pkg/util/validation" ) +var ( + ErrMetricNameLabelMissing = errors.New("metric name label missing") + ErrParialDeleteChunkNoOverlap = errors.New("interval for partial deletion has not overlap with chunk interval") +) + var ( indexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: "cortex", @@ -113,6 +118,7 @@ func (c *store) Put(ctx context.Context, chunks []Chunk) error { // PutOne implements ChunkStore func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error { + log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne") chunks := []Chunk{chunk} err := c.storage.PutChunks(ctx, chunks) @@ -120,7 +126,9 @@ func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chun return err } - c.writeBackCache(ctx, chunks) + if cacheErr := c.writeBackCache(ctx, chunks); cacheErr != nil { + level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) + } writeReqs, err := c.calculateIndexEntries(chunk.UserID, from, through, chunk) if err != nil { @@ -136,7 +144,7 @@ func (c *store) calculateIndexEntries(userID string, from, through model.Time, c metricName := chunk.Metric.Get(labels.MetricName) if metricName == "" { - return nil, fmt.Errorf("no MetricNameLabel for chunk") + return nil, ErrMetricNameLabelMissing } entries, err := c.schema.GetWriteEntries(from, through, userID, metricName, chunk.Metric, chunk.ExternalKey()) @@ -247,6 +255,7 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from } func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { + //nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() @@ -429,6 +438,9 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, fro } func (c *store) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) { + log, ctx := spanlogger.New(ctx, "store.lookupEntriesByQueries") + defer log.Span.Finish() + var lock sync.Mutex var entries []IndexEntry err := c.index.QueryPages(ctx, queries, func(query IndexQuery, resp ReadBatch) bool { @@ -482,3 +494,126 @@ func (c *store) convertChunkIDsToChunks(ctx context.Context, userID string, chun return chunkSet, nil } + +func (c *store) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { + metricName := metric.Get(model.MetricNameLabel) + if metricName == "" { + return ErrMetricNameLabelMissing + } + + chunkWriteEntries, err := c.schema.GetWriteEntries(from, through, userID, string(metricName), metric, chunkID) + if err != nil { + return errors.Wrapf(err, "when getting index entries to delete for chunkID=%s", chunkID) + } + + return c.deleteChunk(ctx, userID, chunkID, metric, chunkWriteEntries, partiallyDeletedInterval, func(chunk Chunk) error { + return c.PutOne(ctx, chunk.From, chunk.Through, chunk) + }) +} + +func (c *store) deleteChunk(ctx context.Context, + userID string, + chunkID string, + metric labels.Labels, + chunkWriteEntries []IndexEntry, + partiallyDeletedInterval *model.Interval, + putChunkFunc func(chunk Chunk) error) error { + + metricName := metric.Get(model.MetricNameLabel) + if metricName == "" { + return ErrMetricNameLabelMissing + } + + // if chunk is partially deleted, fetch it, slice non-deleted portion and put it to store before deleting original chunk + if partiallyDeletedInterval != nil { + err := c.reboundChunk(ctx, userID, chunkID, *partiallyDeletedInterval, putChunkFunc) + if err != nil { + return errors.Wrapf(err, "chunkID=%s", chunkID) + } + } + + batch := c.index.NewWriteBatch() + for i := range chunkWriteEntries { + batch.Delete(chunkWriteEntries[i].TableName, chunkWriteEntries[i].HashValue, chunkWriteEntries[i].RangeValue) + } + + err := c.index.BatchWrite(ctx, batch) + if err != nil { + return errors.Wrapf(err, "when deleting index entries for chunkID=%s", chunkID) + } + + err = c.chunks.DeleteChunk(ctx, chunkID) + if err != nil { + if err == ErrStorageObjectNotFound { + return nil + } + return errors.Wrapf(err, "when deleting chunk from storage with chunkID=%s", chunkID) + } + + return nil +} + +func (c *store) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error { + chunk, err := ParseExternalKey(userID, chunkID) + if err != nil { + return errors.Wrap(err, "when parsing external key") + } + + if !intervalsOverlap(model.Interval{Start: chunk.From, End: chunk.Through}, partiallyDeletedInterval) { + return ErrParialDeleteChunkNoOverlap + } + + chunks, err := c.Fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID}) + if err != nil { + if err == ErrStorageObjectNotFound { + return nil + } + return errors.Wrap(err, "when fetching chunk from storage for slicing") + } + + if len(chunks) != 1 { + return fmt.Errorf("expected to get 1 chunk from storage got %d instead", len(chunks)) + } + + chunk = chunks[0] + var newChunks []*Chunk + if partiallyDeletedInterval.Start > chunk.From { + newChunk, err := chunk.Slice(chunk.From, partiallyDeletedInterval.Start-1) + if err != nil && err != ErrSliceNoDataInRange { + return errors.Wrapf(err, "when slicing chunk for interval %d - %d", chunk.From, partiallyDeletedInterval.Start-1) + } + + if newChunk != nil { + newChunks = append(newChunks, newChunk) + } + } + + if partiallyDeletedInterval.End < chunk.Through { + newChunk, err := chunk.Slice(partiallyDeletedInterval.End+1, chunk.Through) + if err != nil && err != ErrSliceNoDataInRange { + return errors.Wrapf(err, "when slicing chunk for interval %d - %d", partiallyDeletedInterval.End+1, chunk.Through) + } + + if newChunk != nil { + newChunks = append(newChunks, newChunk) + } + } + + for _, newChunk := range newChunks { + if err := newChunk.Encode(); err != nil { + return errors.Wrapf(err, "when encoding new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through) + } + + err = putChunkFunc(*newChunk) + if err != nil { + return errors.Wrapf(err, "when putting new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through) + } + } + + return nil +} + +func (c *store) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { + // SeriesID is something which is only used in SeriesStore so we need not do anything here + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index eb6ced986d181..27a5a84fe97cb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -146,13 +146,13 @@ func (c *Fetcher) worker() { // FetchChunks fetches a set of chunks from cache and store. Note that the keys passed in must be // lexicographically sorted, while the returned chunks are not in the same order as the passed in chunks. func (c *Fetcher) FetchChunks(ctx context.Context, chunks []Chunk, keys []string) ([]Chunk, error) { - log, ctx := spanlogger.New(ctx, "ChunkStore.fetchChunks") + log, ctx := spanlogger.New(ctx, "ChunkStore.FetchChunks") defer log.Span.Finish() // Now fetch the actual chunk data from Memcache / S3 cacheHits, cacheBufs, _ := c.cache.Fetch(ctx, keys) - fromCache, missing, err := c.processCacheResponse(chunks, cacheHits, cacheBufs) + fromCache, missing, err := c.processCacheResponse(ctx, chunks, cacheHits, cacheBufs) if err != nil { level.Warn(log).Log("msg", "error fetching from cache", "err", err) } @@ -199,12 +199,14 @@ func (c *Fetcher) writeBackCache(ctx context.Context, chunks []Chunk) error { // ProcessCacheResponse decodes the chunks coming back from the cache, separating // hits and misses. -func (c *Fetcher) processCacheResponse(chunks []Chunk, keys []string, bufs [][]byte) ([]Chunk, []Chunk, error) { +func (c *Fetcher) processCacheResponse(ctx context.Context, chunks []Chunk, keys []string, bufs [][]byte) ([]Chunk, []Chunk, error) { var ( requests = make([]decodeRequest, 0, len(keys)) responses = make(chan decodeResponse) missing []Chunk ) + log, _ := spanlogger.New(ctx, "Fetcher.processCacheResponse") + defer log.Span.Finish() i, j := 0, 0 for i < len(chunks) && j < len(keys) { @@ -229,6 +231,7 @@ func (c *Fetcher) processCacheResponse(chunks []Chunk, keys []string, bufs [][]b for ; i < len(chunks); i++ { missing = append(missing, chunks[i]) } + level.Debug(log).Log("chunks", len(chunks), "decodeRequests", len(requests), "missing", len(missing)) go func() { for _, request := range requests { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 4f735afcb7fb1..fdb30d339fa32 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -25,6 +25,12 @@ type Store interface { GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) + + // DeleteChunk deletes a chunks index entry and then deletes the actual chunk from chunk storage. + // It takes care of chunks which are deleting partially by creating and inserting a new chunk first and then deleting the original chunk + DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error + // DeleteSeriesIDs is only relevant for SeriesStore. + DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error Stop() } @@ -142,6 +148,21 @@ func (c compositeStore) GetChunkRefs(ctx context.Context, userID string, from, t return chunkIDs, fetchers, err } +// DeleteSeriesIDs deletes series IDs from index in series store +func (c CompositeStore) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error { + return c.forStores(from, through, func(from, through model.Time, store Store) error { + return store.DeleteSeriesIDs(ctx, from, through, userID, metric) + }) +} + +// DeleteChunk deletes a chunks index entry and then deletes the actual chunk from chunk storage. +// It takes care of chunks which are deleting partially by creating and inserting a new chunk first and then deleting the original chunk +func (c CompositeStore) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error { + return c.forStores(from, through, func(from, through model.Time, store Store) error { + return store.DeleteChunk(ctx, from, through, userID, chunkID, metric, partiallyDeletedInterval) + }) +} + func (c compositeStore) Stop() { for _, store := range c.stores { store.Stop() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go new file mode 100644 index 0000000000000..963f1d29d088d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/delete_requests_store.go @@ -0,0 +1,283 @@ +package chunk + +import ( + "context" + "encoding/binary" + "encoding/hex" + "errors" + "flag" + "fmt" + "hash/fnv" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" +) + +type DeleteRequestStatus string + +const ( + StatusReceived DeleteRequestStatus = "received" + StatusBuildingPlan DeleteRequestStatus = "buildingPlan" + StatusDeleting DeleteRequestStatus = "deleting" + StatusProcessed DeleteRequestStatus = "processed" + + separator = "\000" // separator for series selectors in delete requests +) + +var ( + pendingDeleteRequestStatuses = []DeleteRequestStatus{StatusReceived, StatusBuildingPlan, StatusDeleting} + + ErrDeleteRequestNotFound = errors.New("could not find matching delete request") +) + +// DeleteRequest holds all the details about a delete request +type DeleteRequest struct { + RequestID string `json:"request_id"` + UserID string `json:"-"` + StartTime model.Time `json:"start_time"` + EndTime model.Time `json:"end_time"` + Selectors []string `json:"selectors"` + Status DeleteRequestStatus `json:"status"` + Matchers [][]*labels.Matcher `json:"-"` + CreatedAt model.Time `json:"created_at"` +} + +// DeleteStore provides all the methods required to manage lifecycle of delete request and things related to it +type DeleteStore struct { + cfg DeleteStoreConfig + indexClient IndexClient +} + +// DeleteStoreConfig holds configuration for delete store +type DeleteStoreConfig struct { + Store string `yaml:"store"` + RequestsTableName string `yaml:"requests_table_name"` +} + +// RegisterFlags adds the flags required to configure this flag set. +func (cfg *DeleteStoreConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Store, "deletes.store", "", "Store for keeping delete request") + f.StringVar(&cfg.RequestsTableName, "deletes.requests-table-name", "delete_requests", "Name of the table which stores delete requests") +} + +// NewDeleteStore creates a store for managing delete requests +func NewDeleteStore(cfg DeleteStoreConfig, indexClient IndexClient) (*DeleteStore, error) { + ds := DeleteStore{ + cfg: cfg, + indexClient: indexClient, + } + + return &ds, nil +} + +// Add creates entries for a new delete request +func (ds *DeleteStore) AddDeleteRequest(ctx context.Context, userID string, startTime, endTime model.Time, selectors []string) error { + requestID := generateUniqueID(userID, selectors) + + for { + _, err := ds.GetDeleteRequest(ctx, userID, string(requestID)) + if err != nil { + if err == ErrDeleteRequestNotFound { + break + } + return err + } + + // we have a collision here, lets recreate a new requestID and check for collision + time.Sleep(time.Millisecond) + requestID = generateUniqueID(userID, selectors) + } + + // userID, requestID + userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) + + // Add an entry with userID, requestID as range key and status as value to make it easy to manage and lookup status + // We don't want to set anything in hash key here since we would want to find delete requests by just status + writeBatch := ds.indexClient.NewWriteBatch() + writeBatch.Add(ds.cfg.RequestsTableName, "", []byte(userIDAndRequestID), []byte(StatusReceived)) + + // Add another entry with additional details like creation time, time range of delete request and selectors in value + rangeValue := fmt.Sprintf("%x:%x:%x", int64(model.Now()), int64(startTime), int64(endTime)) + writeBatch.Add(ds.cfg.RequestsTableName, userIDAndRequestID, []byte(rangeValue), []byte(strings.Join(selectors, separator))) + + return ds.indexClient.BatchWrite(ctx, writeBatch) +} + +// GetDeleteRequestsByStatus returns all delete requests for given status +func (ds *DeleteStore) GetDeleteRequestsByStatus(ctx context.Context, status DeleteRequestStatus) ([]DeleteRequest, error) { + return ds.queryDeleteRequests(ctx, []IndexQuery{{TableName: ds.cfg.RequestsTableName, ValueEqual: []byte(status)}}) +} + +// GetDeleteRequestsForUserByStatus returns all delete requests for a user with given status +func (ds *DeleteStore) GetDeleteRequestsForUserByStatus(ctx context.Context, userID string, status DeleteRequestStatus) ([]DeleteRequest, error) { + return ds.queryDeleteRequests(ctx, []IndexQuery{ + {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userID), ValueEqual: []byte(status)}, + }) +} + +// GetAllDeleteRequestsForUser returns all delete requests for a user +func (ds *DeleteStore) GetAllDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { + return ds.queryDeleteRequests(ctx, []IndexQuery{ + {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userID)}, + }) +} + +// UpdateStatus updates status of a delete request +func (ds *DeleteStore) UpdateStatus(ctx context.Context, userID, requestID string, newStatus DeleteRequestStatus) error { + userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) + + writeBatch := ds.indexClient.NewWriteBatch() + writeBatch.Add(ds.cfg.RequestsTableName, "", []byte(userIDAndRequestID), []byte(newStatus)) + + return ds.indexClient.BatchWrite(ctx, writeBatch) +} + +// GetDeleteRequest returns delete request with given requestID +func (ds *DeleteStore) GetDeleteRequest(ctx context.Context, userID, requestID string) (*DeleteRequest, error) { + userIDAndRequestID := fmt.Sprintf("%s:%s", userID, requestID) + + deleteRequests, err := ds.queryDeleteRequests(ctx, []IndexQuery{ + {TableName: ds.cfg.RequestsTableName, RangeValuePrefix: []byte(userIDAndRequestID)}, + }) + + if err != nil { + return nil, err + } + + if len(deleteRequests) == 0 { + return nil, ErrDeleteRequestNotFound + } + + return &deleteRequests[0], nil +} + +// GetPendingDeleteRequestsForUser returns all delete requests for a user which are not processed +func (ds *DeleteStore) GetPendingDeleteRequestsForUser(ctx context.Context, userID string) ([]DeleteRequest, error) { + pendingDeleteRequests := []DeleteRequest{} + for _, status := range pendingDeleteRequestStatuses { + deleteRequests, err := ds.GetDeleteRequestsForUserByStatus(ctx, userID, status) + if err != nil { + return nil, err + } + + pendingDeleteRequests = append(pendingDeleteRequests, deleteRequests...) + } + + return pendingDeleteRequests, nil +} + +func (ds *DeleteStore) queryDeleteRequests(ctx context.Context, deleteQuery []IndexQuery) ([]DeleteRequest, error) { + deleteRequests := []DeleteRequest{} + err := ds.indexClient.QueryPages(ctx, deleteQuery, func(query IndexQuery, batch ReadBatch) (shouldContinue bool) { + itr := batch.Iterator() + for itr.Next() { + userID, requestID := splitUserIDAndRequestID(string(itr.RangeValue())) + + deleteRequests = append(deleteRequests, DeleteRequest{ + UserID: userID, + RequestID: requestID, + Status: DeleteRequestStatus(itr.Value()), + }) + } + return true + }) + if err != nil { + return nil, err + } + + for i, deleteRequest := range deleteRequests { + deleteRequestQuery := []IndexQuery{{TableName: ds.cfg.RequestsTableName, HashValue: fmt.Sprintf("%s:%s", deleteRequest.UserID, deleteRequest.RequestID)}} + + var parseError error + err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query IndexQuery, batch ReadBatch) (shouldContinue bool) { + itr := batch.Iterator() + itr.Next() + + deleteRequest, err = parseDeleteRequestTimestamps(itr.RangeValue(), deleteRequest) + if err != nil { + parseError = err + return false + } + + deleteRequest.Selectors = strings.Split(string(itr.Value()), separator) + deleteRequests[i] = deleteRequest + + return true + }) + + if err != nil { + return nil, err + } + + if parseError != nil { + return nil, parseError + } + } + + return deleteRequests, nil +} + +func parseDeleteRequestTimestamps(rangeValue []byte, deleteRequest DeleteRequest) (DeleteRequest, error) { + hexParts := strings.Split(string(rangeValue), ":") + if len(hexParts) != 3 { + return deleteRequest, errors.New("invalid key in parsing delete request lookup response") + } + + createdAt, err := strconv.ParseInt(hexParts[0], 16, 64) + if err != nil { + return deleteRequest, err + } + + from, err := strconv.ParseInt(hexParts[1], 16, 64) + if err != nil { + return deleteRequest, err + + } + through, err := strconv.ParseInt(hexParts[2], 16, 64) + if err != nil { + return deleteRequest, err + + } + + deleteRequest.CreatedAt = model.Time(createdAt) + deleteRequest.StartTime = model.Time(from) + deleteRequest.EndTime = model.Time(through) + + return deleteRequest, nil +} + +// An id is useful in managing delete requests +func generateUniqueID(orgID string, selectors []string) []byte { + uniqueID := fnv.New32() + _, _ = uniqueID.Write([]byte(orgID)) + + timeNow := make([]byte, 8) + binary.LittleEndian.PutUint64(timeNow, uint64(time.Now().UnixNano())) + _, _ = uniqueID.Write(timeNow) + + for _, selector := range selectors { + _, _ = uniqueID.Write([]byte(selector)) + } + + return encodeUniqueID(uniqueID.Sum32()) +} + +func encodeUniqueID(t uint32) []byte { + throughBytes := make([]byte, 4) + binary.BigEndian.PutUint32(throughBytes, t) + encodedThroughBytes := make([]byte, 8) + hex.Encode(encodedThroughBytes, throughBytes) + return encodedThroughBytes +} + +func splitUserIDAndRequestID(rangeValue string) (userID, requestID string) { + lastIndex := strings.LastIndex(rangeValue, ":") + + userID = rangeValue[:lastIndex] + requestID = rangeValue[lastIndex+1:] + + return +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go index 5ac314d9d0fde..95f2a61ccdc18 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go @@ -15,7 +15,7 @@ type Config struct{} var ( // DefaultEncoding exported for use in unit tests elsewhere - DefaultEncoding = DoubleDelta + DefaultEncoding = Bigchunk alwaysMarshalFullsizeChunks = true bigchunkSizeCapBytes = 0 ) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go index 2df8abc482716..c9580214d2cdd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go @@ -13,7 +13,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - +//nolint //Since this was copied from Prometheus leave it as is package encoding import ( diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit_helpers.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit_helpers.go index 9fe9c09feaf17..31f13b1647883 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit_helpers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit_helpers.go @@ -13,7 +13,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - +//nolint //Since this was copied from Prometheus leave it as is package encoding import "github.com/prometheus/common/model" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index ec305577b8e8c..c163e529f1c71 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -60,8 +60,6 @@ type storageClientColumnKey struct { schemaCfg chunk.SchemaConfig client *bigtable.Client keysFn keysFn - - distributeKeys bool } // storageClientV1 implements chunk.storageClient for GCP. @@ -169,6 +167,11 @@ func (b bigtableWriteBatch) Add(tableName, hashValue string, rangeValue []byte, mutation.Set(columnFamily, columnKey, 0, value) } +func (b bigtableWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { + // ToDo: implement this to support deleting index entries from Bigtable + panic("Bigtable does not support Deleting index entries yet") +} + func (s *storageClientColumnKey) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { bigtableBatch := batch.(bigtableWriteBatch) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go index 31b6670d8f404..46fbe2c2da9b0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go @@ -160,3 +160,8 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun return output, nil } + +func (s *bigtableObjectClient) DeleteChunk(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from Bigtable + return chunk.ErrMethodNotImplemented +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go index 040dd3effcd5a..4785e1951a54f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go @@ -127,3 +127,8 @@ func (s *GCSObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stor return storageObjects, nil } + +func (s *GCSObjectClient) DeleteObject(ctx context.Context, chunkID string) error { + // ToDo: implement this to support deleting chunks from GCS + return chunk.ErrMethodNotImplemented +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index 9456d13649a00..80c17678b928c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "fmt" + "io" + "io/ioutil" "sort" "sync" @@ -135,9 +137,9 @@ func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { mockBatch := *batch.(*mockWriteBatch) seenWrites := map[string]bool{} - m.numWrites += len(mockBatch) + m.numWrites += len(mockBatch.inserts) - for _, req := range mockBatch { + for _, req := range mockBatch.inserts { table, ok := m.tables[req.tableName] if !ok { return fmt.Errorf("table not found") @@ -162,15 +164,9 @@ func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { items = append(items, mockItem{}) copy(items[i+1:], items[i:]) } else { - // Return error if duplicate write and not metric name entry or series entry - itemComponents := decodeRangeKey(items[i].rangeValue) - keyType := itemComponents[3][0] - if keyType != metricNameRangeKeyV1 && - keyType != seriesRangeKeyV1 && - keyType != labelNamesRangeKeyV1 && - keyType != labelSeriesRangeKeyV1 { - return fmt.Errorf("Dupe write") - } + // if duplicate write then just update the value + items[i].value = req.value + continue } items[i] = mockItem{ rangeValue: req.rangeValue, @@ -179,6 +175,31 @@ func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { table.items[req.hashValue] = items } + + for _, req := range mockBatch.deletes { + table, ok := m.tables[req.tableName] + if !ok { + return fmt.Errorf("table not found") + } + + items := table.items[req.hashValue] + + i := sort.Search(len(items), func(i int) bool { + return bytes.Compare(items[i].rangeValue, req.rangeValue) >= 0 + }) + + if i >= len(items) || !bytes.Equal(items[i].rangeValue, req.rangeValue) { + continue + } + + if len(items) == 1 { + items = nil + } else { + items = items[:i+copy(items[i:], items[i+1:])] + } + + table.items[req.hashValue] = items + } return nil } @@ -301,7 +322,7 @@ func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, key := chunk.ExternalKey() buf, ok := m.objects[key] if !ok { - return nil, fmt.Errorf("%v not found", key) + return nil, ErrStorageObjectNotFound } if err := chunk.Decode(decodeContext, buf); err != nil { return nil, err @@ -311,14 +332,82 @@ func (m *MockStorage) GetChunks(ctx context.Context, chunkSet []Chunk) ([]Chunk, return result, nil } -type mockWriteBatch []struct { - tableName, hashValue string - rangeValue []byte - value []byte +// DeleteChunk implements StorageClient. +func (m *MockStorage) DeleteChunk(ctx context.Context, chunkID string) error { + return m.DeleteObject(ctx, chunkID) +} + +func (m *MockStorage) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + buf, ok := m.objects[objectKey] + if !ok { + return nil, ErrStorageObjectNotFound + } + + return ioutil.NopCloser(bytes.NewReader(buf)), nil +} + +func (m *MockStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + buf, err := ioutil.ReadAll(object) + if err != nil { + return err + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + m.objects[objectKey] = buf + return nil +} + +func (m *MockStorage) DeleteObject(ctx context.Context, objectKey string) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + if _, ok := m.objects[objectKey]; !ok { + return ErrStorageObjectNotFound + } + + delete(m.objects, objectKey) + return nil +} + +func (m *MockStorage) List(ctx context.Context, prefix string) ([]StorageObject, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + storageObjects := make([]StorageObject, 0, len(m.objects)) + for key := range m.objects { + // ToDo: Store mtime when we have mtime based use-cases for storage objects + storageObjects = append(storageObjects, StorageObject{Key: key}) + } + + return storageObjects, nil +} + +type mockWriteBatch struct { + inserts []struct { + tableName, hashValue string + rangeValue []byte + value []byte + } + deletes []struct { + tableName, hashValue string + rangeValue []byte + } +} + +func (b *mockWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { + b.deletes = append(b.deletes, struct { + tableName, hashValue string + rangeValue []byte + }{tableName: tableName, hashValue: hashValue, rangeValue: rangeValue}) } func (b *mockWriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - *b = append(*b, struct { + b.inserts = append(b.inserts, struct { tableName, hashValue string rangeValue []byte value []byte diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index 4bad3d2f9e594..b9aadd338219c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -5,6 +5,7 @@ import ( "context" "errors" "flag" + "fmt" "os" "path" "sync" @@ -127,7 +128,8 @@ func (b *BoltIndexClient) Stop() { func (b *BoltIndexClient) NewWriteBatch() chunk.WriteBatch { return &boltWriteBatch{ - tables: map[string]map[string][]byte{}, + puts: map[string]map[string][]byte{}, + deletes: map[string]map[string]struct{}{}, } } @@ -170,7 +172,8 @@ func (b *BoltIndexClient) GetDB(name string, operation int) (*bbolt.DB, error) { } func (b *BoltIndexClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch) error { - for table, kvps := range batch.(*boltWriteBatch).tables { + // ToDo: too much code duplication, refactor this + for table, kvps := range batch.(*boltWriteBatch).puts { db, err := b.GetDB(table, DBOperationWrite) if err != nil { return err @@ -193,6 +196,31 @@ func (b *BoltIndexClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch return err } } + + for table, kvps := range batch.(*boltWriteBatch).deletes { + db, err := b.GetDB(table, DBOperationWrite) + if err != nil { + return err + } + + if err := db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucketName) + if b == nil { + return fmt.Errorf("Bucket %s not found in table %s", bucketName, table) + } + + for key := range kvps { + if err := b.Delete([]byte(key)); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil } @@ -258,14 +286,26 @@ func (b *BoltIndexClient) QueryDB(ctx context.Context, db *bbolt.DB, query chunk } type boltWriteBatch struct { - tables map[string]map[string][]byte + puts map[string]map[string][]byte + deletes map[string]map[string]struct{} +} + +func (b *boltWriteBatch) Delete(tableName, hashValue string, rangeValue []byte) { + table, ok := b.deletes[tableName] + if !ok { + table = map[string]struct{}{} + b.deletes[tableName] = table + } + + key := hashValue + separator + string(rangeValue) + table[key] = struct{}{} } func (b *boltWriteBatch) Add(tableName, hashValue string, rangeValue []byte, value []byte) { - table, ok := b.tables[tableName] + table, ok := b.puts[tableName] if !ok { table = map[string][]byte{} - b.tables[tableName] = table + b.puts[tableName] = table } key := hashValue + separator + string(rangeValue) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index 0f4e33443ff0a..a10bd297648ab 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -53,7 +53,12 @@ func (FSObjectClient) Stop() {} // GetObject from the store func (f *FSObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) { - return os.Open(path.Join(f.cfg.Directory, objectKey)) + fl, err := os.Open(path.Join(f.cfg.Directory, objectKey)) + if err != nil && os.IsNotExist(err) { + return nil, chunk.ErrStorageObjectNotFound + } + + return fl, err } // PutObject into the store @@ -106,6 +111,15 @@ func (f *FSObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stora return storageObjects, nil } +func (f *FSObjectClient) DeleteObject(ctx context.Context, objectKey string) error { + err := os.Remove(path.Join(f.cfg.Directory, objectKey)) + if err != nil && os.IsNotExist(err) { + return chunk.ErrStorageObjectNotFound + } + + return err +} + // DeleteChunksBefore implements BucketClient func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error { return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go index 318ff29d6807a..70b2ff1128bde 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/objectclient/client.go @@ -108,3 +108,8 @@ func (o *Client) getChunk(ctx context.Context, decodeContext *chunk.DecodeContex } return c, nil } + +// GetChunks retrieves the specified chunks from the configured backend +func (o *Client) DeleteChunk(ctx context.Context, chunkID string) error { + return o.store.DeleteObject(ctx, chunkID) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index e52a5eed11530..c7af79a2763ca 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -5,11 +5,16 @@ import ( "encoding/hex" "errors" "fmt" + "strconv" "strings" + "github.com/go-kit/kit/log/level" jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/querier/astmapper" + "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -33,6 +38,8 @@ var ( ErrNotSupported = errors.New("not supported") ) +type hasChunksForIntervalFunc func(userID, seriesID string, from, through model.Time) (bool, error) + // Schema interface defines methods to calculate the hash and range keys needed // to write or read chunks from the external index. type Schema interface { @@ -48,11 +55,18 @@ type Schema interface { GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) + FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery // If the query resulted in series IDs, use this method to find chunks. GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) // Returns queries to retrieve all label names of multiple series by id. GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) + + // GetSeriesDeleteEntries returns IndexEntry's for deleting SeriesIDs from SeriesStore. + // Since SeriesIDs are created per bucket, it makes sure that we don't include series entries which are in use by verifying using hasChunksForIntervalFunc i.e + // It checks first and last buckets covered by the time interval to see if a SeriesID still has chunks in the store, + // if yes then it doesn't include IndexEntry's for that bucket for deletion. + GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) } // IndexQuery describes a query for entries @@ -204,6 +218,78 @@ func (s schema) GetChunksForSeries(from, through model.Time, userID string, seri return result, nil } +// GetSeriesDeleteEntries returns IndexEntry's for deleting SeriesIDs from SeriesStore. +// Since SeriesIDs are created per bucket, it makes sure that we don't include series entries which are in use by verifying using hasChunksForIntervalFunc i.e +// It checks first and last buckets covered by the time interval to see if a SeriesID still has chunks in the store, +// if yes then it doesn't include IndexEntry's for that bucket for deletion. +func (s schema) GetSeriesDeleteEntries(from, through model.Time, userID string, metric labels.Labels, hasChunksForIntervalFunc hasChunksForIntervalFunc) ([]IndexEntry, error) { + metricName := metric.Get(model.MetricNameLabel) + if metricName == "" { + return nil, ErrMetricNameLabelMissing + } + + buckets := s.buckets(from, through, userID) + if len(buckets) == 0 { + return nil, nil + } + + seriesID := string(labelsSeriesID(metric)) + + // Only first and last buckets needs to be checked for in-use series ids. + // Only partially deleted first/last deleted bucket needs to be checked otherwise + // not since whole bucket is anyways considered for deletion. + + // Bucket times are relative to the bucket i.e for a per-day bucket + // bucket.from would be the number of milliseconds elapsed since the start of that day. + // If bucket.from is not 0, it means the from param doesn't align with the start of the bucket. + if buckets[0].from != 0 { + bucketStartTime := from - model.Time(buckets[0].from) + hasChunks, err := hasChunksForIntervalFunc(userID, seriesID, bucketStartTime, bucketStartTime+model.Time(buckets[0].bucketSize)-1) + if err != nil { + return nil, err + } + + if hasChunks { + buckets = buckets[1:] + if len(buckets) == 0 { + return nil, nil + } + } + } + + lastBucket := buckets[len(buckets)-1] + + // Similar to bucket.from, bucket.through here is also relative i.e for a per-day bucket + // through would be the number of milliseconds elapsed since the start of that day + // If bucket.through is not equal to max size of bucket, it means the through param doesn't align with the end of the bucket. + if lastBucket.through != lastBucket.bucketSize { + bucketStartTime := through - model.Time(lastBucket.through) + hasChunks, err := hasChunksForIntervalFunc(userID, seriesID, bucketStartTime, bucketStartTime+model.Time(lastBucket.bucketSize)-1) + if err != nil { + return nil, err + } + + if hasChunks { + buckets = buckets[:len(buckets)-1] + if len(buckets) == 0 { + return nil, nil + } + } + } + + var result []IndexEntry + + for _, bucket := range buckets { + entries, err := s.entries.GetLabelWriteEntries(bucket, metricName, metric, "") + if err != nil { + return nil, err + } + result = append(result, entries...) + } + + return result, nil +} + func (s schema) GetLabelNamesForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { var result []IndexQuery @@ -218,6 +304,10 @@ func (s schema) GetLabelNamesForSeries(from, through model.Time, userID string, return result, nil } +func (s schema) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { + return s.entries.FilterReadQueries(queries, shard) +} + type entries interface { GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) @@ -228,6 +318,7 @@ type entries interface { GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) GetLabelNamesForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) + FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery } // original entries: @@ -303,6 +394,10 @@ func (originalEntries) GetLabelNamesForSeries(_ Bucket, _ []byte) ([]IndexQuery, return nil, ErrNotSupported } +func (originalEntries) FilterReadQueries(queries []IndexQuery, shard *astmapper.ShardAnnotation) []IndexQuery { + return queries +} + // v3Schema went to base64 encoded label values & a version ID // - range key: