From c15594a03c459fccbb295ae02c1feb33e00eab37 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 23 Sep 2021 16:55:58 +0100 Subject: [PATCH 01/33] Query: Put cache name on tracing spans (#4696) * Put cache name on tracing spans With multiple caches, this change lets you see clearly which spans relate to which cache. Signed-off-by: Bryan Boreham * Fix up mock cache with new Name() method Signed-off-by: Bryan Boreham --- CHANGELOG.md | 1 + pkg/cache/cache.go | 2 ++ pkg/cache/inmemory.go | 6 ++++++ pkg/cache/memcached.go | 6 ++++++ pkg/cache/tracing_cache.go | 5 +++++ pkg/store/cache/caching_bucket_test.go | 4 ++++ 6 files changed, 24 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f071c41be3..dde5d56aa38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Added - [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response. +- [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. ## v0.23.0 - In Progress diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index acaa0e159d3..24e52aac00e 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -18,4 +18,6 @@ type Cache interface { // Fetch multiple keys from cache. Returns map of input keys to data. // If key isn't in the map, data for given key was not found. Fetch(ctx context.Context, keys []string) map[string][]byte + + Name() string } diff --git a/pkg/cache/inmemory.go b/pkg/cache/inmemory.go index 6a6036a03a8..8ddfe9f0b93 100644 --- a/pkg/cache/inmemory.go +++ b/pkg/cache/inmemory.go @@ -41,6 +41,7 @@ type InMemoryCache struct { logger log.Logger maxSizeBytes uint64 maxItemSizeBytes uint64 + name string mtx sync.Mutex curSize uint64 @@ -100,6 +101,7 @@ func NewInMemoryCacheWithConfig(name string, logger log.Logger, reg prometheus.R logger: logger, maxSizeBytes: uint64(config.MaxSize), maxItemSizeBytes: uint64(config.MaxItemSize), + name: name, } c.evicted = promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -303,3 +305,7 @@ func (c *InMemoryCache) Fetch(ctx context.Context, keys []string) map[string][]b } return results } + +func (c *InMemoryCache) Name() string { + return c.name +} diff --git a/pkg/cache/memcached.go b/pkg/cache/memcached.go index 04c249576e2..695b78e6cd8 100644 --- a/pkg/cache/memcached.go +++ b/pkg/cache/memcached.go @@ -19,6 +19,7 @@ import ( type MemcachedCache struct { logger log.Logger memcached cacheutil.MemcachedClient + name string // Metrics. requests prometheus.Counter @@ -30,6 +31,7 @@ func NewMemcachedCache(name string, logger log.Logger, memcached cacheutil.Memca c := &MemcachedCache{ logger: logger, memcached: memcached, + name: name, } c.requests = promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -81,3 +83,7 @@ func (c *MemcachedCache) Fetch(ctx context.Context, keys []string) map[string][] c.hits.Add(float64(len(results))) return results } + +func (c *MemcachedCache) Name() string { + return c.name +} diff --git a/pkg/cache/tracing_cache.go b/pkg/cache/tracing_cache.go index ed440c43ad5..81fb5bef8e2 100644 --- a/pkg/cache/tracing_cache.go +++ b/pkg/cache/tracing_cache.go @@ -27,6 +27,7 @@ func (t TracingCache) Store(ctx context.Context, data map[string][]byte, ttl tim func (t TracingCache) Fetch(ctx context.Context, keys []string) (result map[string][]byte) { tracing.DoWithSpan(ctx, "cache_fetch", func(spanCtx context.Context, span opentracing.Span) { + span.SetTag("name", t.Name()) span.LogKV("requested keys", len(keys)) result = t.c.Fetch(spanCtx, keys) @@ -39,3 +40,7 @@ func (t TracingCache) Fetch(ctx context.Context, keys []string) (result map[stri }) return } + +func (t TracingCache) Name() string { + return t.c.Name() +} diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 9bf0bddcbd9..902be08f37f 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -295,6 +295,10 @@ func (m *mockCache) Fetch(_ context.Context, keys []string) map[string][]byte { return found } +func (m *mockCache) Name() string { + return "mockCache" +} + func (m *mockCache) flush() { m.cache = map[string]cacheItem{} } From 8229b4575a4c339c451ccfed17e45a9f8186b14c Mon Sep 17 00:00:00 2001 From: Aditi Ahuja <48997495+metonymic-smokey@users.noreply.github.com> Date: Thu, 23 Sep 2021 23:30:19 +0530 Subject: [PATCH 02/33] Added experimental features flag (#4679) * added enable features flag Signed-off-by: metonymic-smokey * changelog fixes Signed-off-by: metonymic-smokey * doc changes Signed-off-by: metonymic-smokey * review suggestion added Signed-off-by: metonymic-smokey * updated docs Signed-off-by: metonymic-smokey --- CHANGELOG.md | 1 + cmd/thanos/query.go | 24 ++++++++++++++++++++++++ docs/components/query.md | 3 +++ 3 files changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dde5d56aa38..3da3bf96754 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Added - [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response. +- [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus. - [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. ## v0.23.0 - In Progress diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 4d893f212ed..50884810e4a 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -52,6 +52,11 @@ import ( "github.com/thanos-io/thanos/pkg/ui" ) +const ( + promqlNegativeOffset = "promql-negative-offset" + promqlAtModifier = "promql-at-modifier" +) + // registerQuery registers a query command. func registerQuery(app *extkingpin.App) { comp := component.Query @@ -146,6 +151,8 @@ func registerQuery(app *extkingpin.App) { enableMetricMetadataPartialResponse := cmd.Flag("metric-metadata.partial-response", "Enable partial response for metric metadata endpoint. --no-metric-metadata.partial-response for disabling."). Hidden().Default("true").Bool() + featureList := cmd.Flag("enable-feature", "Comma separated experimental feature names to enable.The current list of features is "+promqlNegativeOffset+" and "+promqlAtModifier+".").Default("").Strings() + enableExemplarPartialResponse := cmd.Flag("exemplar.partial-response", "Enable partial response for exemplar endpoint. --no-exemplar.partial-response for disabling."). Hidden().Default("true").Bool() @@ -163,6 +170,16 @@ func registerQuery(app *extkingpin.App) { return errors.Wrap(err, "parse federation labels") } + var enableNegativeOffset, enableAtModifier bool + for _, feature := range *featureList { + if feature == promqlNegativeOffset { + enableNegativeOffset = true + } + if feature == promqlAtModifier { + enableAtModifier = true + } + } + if dup := firstDuplicate(*stores); dup != "" { return errors.Errorf("Address %s is duplicated for --store flag.", dup) } @@ -266,6 +283,8 @@ func registerQuery(app *extkingpin.App) { *defaultMetadataTimeRange, *strictStores, *webDisableCORS, + enableAtModifier, + enableNegativeOffset, component.Query, ) }) @@ -329,6 +348,8 @@ func runQuery( defaultMetadataTimeRange time.Duration, strictStores []string, disableCORS bool, + enableAtModifier bool, + enableNegativeOffset bool, comp component.Component, ) error { // TODO(bplotka in PR #513 review): Move arguments into struct. @@ -456,6 +477,9 @@ func runQuery( cancelRun() }) + engineOpts.EnableAtModifier = enableAtModifier + engineOpts.EnableNegativeOffset = enableNegativeOffset + ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) g.Add(func() error { for { diff --git a/docs/components/query.md b/docs/components/query.md index 63f7dd0a2cf..41e2cccf896 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -252,6 +252,9 @@ Query node exposing PromQL enabled Query API with data retrieved from multiple store nodes. Flags: + --enable-feature= ... Comma separated experimental feature names to + enable.The current list of features is + promql-negative-offset and promql-at-modifier. --grpc-address="0.0.0.0:10901" Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable From 0d524a91ff19320ce183d404254efcd03eb8643f Mon Sep 17 00:00:00 2001 From: Augustin Husson Date: Fri, 24 Sep 2021 04:10:32 +0200 Subject: [PATCH 03/33] UI: upgrade typescript to v4 and fix some eslint warnings (#4692) Signed-off-by: Augustin Husson --- pkg/ui/react-app/package-lock.json | 14 +++--- pkg/ui/react-app/package.json | 2 +- .../src/pages/graph/ExpressionInput.tsx | 20 ++++---- pkg/ui/react-app/src/pages/graph/Graph.tsx | 18 ++++---- .../react-app/src/pages/graph/GraphHelpers.ts | 9 ++-- pkg/ui/react-app/src/pages/graph/Legend.tsx | 46 ++++++++++--------- pkg/ui/react-app/src/pages/graph/Panel.tsx | 16 +++---- .../react-app/src/pages/graph/TimeInput.tsx | 10 ++-- pkg/ui/react-app/src/utils/index.ts | 22 +++++---- pkg/ui/react-app/tsconfig.json | 2 +- 10 files changed, 83 insertions(+), 76 deletions(-) diff --git a/pkg/ui/react-app/package-lock.json b/pkg/ui/react-app/package-lock.json index bea0b169cd7..9b41e52944b 100644 --- a/pkg/ui/react-app/package-lock.json +++ b/pkg/ui/react-app/package-lock.json @@ -82,7 +82,7 @@ "prettier": "^2.3.2", "react-scripts": "^4.0.3", "sinon": "^9.2.4", - "typescript": "3.9.9" + "typescript": "^4.4.3" }, "optionalDependencies": { "fsevents": "^2.3.2" @@ -23153,9 +23153,9 @@ } }, "node_modules/typescript": { - "version": "3.9.9", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.9.tgz", - "integrity": "sha512-kdMjTiekY+z/ubJCATUPlRDl39vXYiMV9iyeMuEuXZh2we6zz80uovNN2WlAxmmdE/Z/YQe+EbOEXB5RHEED3w==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.3.tgz", + "integrity": "sha512-4xfscpisVgqqDfPaJo5vkd+Qd/ItkoagnHpufr+i2QCHBsNYp+G7UAoyFl8aPtx879u38wPV65rZ8qbGZijalA==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -43794,9 +43794,9 @@ } }, "typescript": { - "version": "3.9.9", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.9.tgz", - "integrity": "sha512-kdMjTiekY+z/ubJCATUPlRDl39vXYiMV9iyeMuEuXZh2we6zz80uovNN2WlAxmmdE/Z/YQe+EbOEXB5RHEED3w==", + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.4.3.tgz", + "integrity": "sha512-4xfscpisVgqqDfPaJo5vkd+Qd/ItkoagnHpufr+i2QCHBsNYp+G7UAoyFl8aPtx879u38wPV65rZ8qbGZijalA==", "dev": true }, "unbox-primitive": { diff --git a/pkg/ui/react-app/package.json b/pkg/ui/react-app/package.json index e418507b374..50c40136e36 100644 --- a/pkg/ui/react-app/package.json +++ b/pkg/ui/react-app/package.json @@ -97,7 +97,7 @@ "prettier": "^2.3.2", "react-scripts": "^4.0.3", "sinon": "^9.2.4", - "typescript": "3.9.9" + "typescript": "^4.4.3" }, "proxy": "http://localhost:10902", "jest": { diff --git a/pkg/ui/react-app/src/pages/graph/ExpressionInput.tsx b/pkg/ui/react-app/src/pages/graph/ExpressionInput.tsx index ade2a9e5f5a..e1b5a20a9b5 100644 --- a/pkg/ui/react-app/src/pages/graph/ExpressionInput.tsx +++ b/pkg/ui/react-app/src/pages/graph/ExpressionInput.tsx @@ -33,34 +33,34 @@ class ExpressionInput extends Component { + setHeight = (): void => { const { offsetHeight, clientHeight, scrollHeight } = this.exprInputRef.current!; const offset = offsetHeight - clientHeight; // Needed in order for the height to be more accurate. this.setState({ height: scrollHeight + offset }); }; - handleInput = () => { + handleInput = (): void => { this.setValue(this.exprInputRef.current!.value); }; - setValue = (value: string | null) => { + setValue = (value: string | null): void => { const { onExpressionChange } = this.props; onExpressionChange(value as string); this.setState({ height: 'auto' }, this.setHeight); }; - componentDidUpdate(prevProps: ExpressionInputProps) { + componentDidUpdate(prevProps: ExpressionInputProps): void { const { value } = this.props; if (value !== prevProps.value) { this.setValue(value); } } - handleKeyPress = (event: React.KeyboardEvent) => { + handleKeyPress = (event: React.KeyboardEvent): void => { const { executeQuery } = this.props; if (event.key === 'Enter' && !event.shiftKey) { executeQuery(); @@ -68,18 +68,18 @@ class ExpressionInput extends Component { + getSearchMatches = (input: string, expressions: string[]): FuzzyResult[] => { return fuz.filter(input.replace(/ /g, ''), expressions); }; - createAutocompleteSection = (downshift: ControllerStateAndHelpers) => { + createAutocompleteSection = (downshift: ControllerStateAndHelpers): JSX.Element | null => { const { inputValue = '', closeMenu, highlightedIndex } = downshift; const { autocompleteSections } = this.props; let index = 0; const sections = - inputValue!.length && this.props.enableAutocomplete + inputValue?.length && this.props.enableAutocomplete ? Object.entries(autocompleteSections).reduce((acc, [title, items]) => { - const matches = this.getSearchMatches(inputValue!, items); + const matches = this.getSearchMatches(inputValue, items); return !matches.length ? acc : [ diff --git a/pkg/ui/react-app/src/pages/graph/Graph.tsx b/pkg/ui/react-app/src/pages/graph/Graph.tsx index 09b15c8252d..1620bee1cd0 100644 --- a/pkg/ui/react-app/src/pages/graph/Graph.tsx +++ b/pkg/ui/react-app/src/pages/graph/Graph.tsx @@ -44,7 +44,7 @@ class Graph extends PureComponent { chartData: normalizeData(this.props), }; - componentDidUpdate(prevProps: GraphProps) { + componentDidUpdate(prevProps: GraphProps): void { const { data, stacked, useLocalTime } = this.props; if (prevProps.data !== data) { this.selectedSeriesIndexes = []; @@ -64,11 +64,11 @@ class Graph extends PureComponent { } } - componentDidMount() { + componentDidMount(): void { this.plot(); } - componentWillUnmount() { + componentWillUnmount(): void { this.destroyPlot(); } @@ -81,20 +81,20 @@ class Graph extends PureComponent { this.$chart = $.plot($(this.chartRef.current), data, getOptions(this.props.stacked, this.props.useLocalTime)); }; - destroyPlot = () => { + destroyPlot = (): void => { if (isPresent(this.$chart)) { this.$chart.destroy(); } }; - plotSetAndDraw(data: GraphSeries[] = this.state.chartData) { + plotSetAndDraw(data: GraphSeries[] = this.state.chartData): void { if (isPresent(this.$chart)) { this.$chart.setData(data); this.$chart.draw(); } } - handleSeriesSelect = (selected: number[], selectedIndex: number) => { + handleSeriesSelect = (selected: number[], selectedIndex: number): void => { const { chartData } = this.state; this.plot( this.selectedSeriesIndexes.length === 1 && this.selectedSeriesIndexes.includes(selectedIndex) @@ -113,18 +113,18 @@ class Graph extends PureComponent { }); }; - handleLegendMouseOut = () => { + handleLegendMouseOut = (): void => { cancelAnimationFrame(this.rafID); this.plotSetAndDraw(); }; - handleResize = () => { + handleResize = (): void => { if (isPresent(this.$chart)) { this.plot(this.$chart.getData() as GraphSeries[]); } }; - render() { + render(): JSX.Element { const { chartData } = this.state; return (
diff --git a/pkg/ui/react-app/src/pages/graph/GraphHelpers.ts b/pkg/ui/react-app/src/pages/graph/GraphHelpers.ts index b8357622cf0..228dcb1cbcb 100644 --- a/pkg/ui/react-app/src/pages/graph/GraphHelpers.ts +++ b/pkg/ui/react-app/src/pages/graph/GraphHelpers.ts @@ -53,7 +53,7 @@ export const formatValue = (y: number | null): string => { throw Error("couldn't format a value, this is a bug"); }; -export const getHoverColor = (color: string, opacity: number, stacked: boolean) => { +export const getHoverColor = (color: string, opacity: number, stacked: boolean): string => { const { r, g, b } = $.color.parse(color); if (!stacked) { return `rgba(${r}, ${g}, ${b}, ${opacity})`; @@ -137,7 +137,10 @@ export const getOptions = (stacked: boolean, useLocalTime: boolean): jquery.flot }; // This was adapted from Flot's color generation code. -export const getColors = (data: { resultType: string; result: Array<{ metric: Metric; values: [number, string][] }> }) => { +export const getColors = (data: { + resultType: string; + result: Array<{ metric: Metric; values: [number, string][] }>; +}): Color[] => { const colorPool = ['#edc240', '#afd8f8', '#cb4b4b', '#4da74d', '#9440ed']; const colorPoolSize = colorPool.length; let variation = 0; @@ -189,7 +192,7 @@ export const normalizeData = ({ queryParams, data }: GraphProps): GraphSeries[] }); }; -export const parseValue = (value: string) => { +export const parseValue = (value: string): null | number => { const val = parseFloat(value); // "+Inf", "-Inf", "+Inf" will be parsed into NaN by parseFloat(). They // can't be graphed, so show them as gaps (null). diff --git a/pkg/ui/react-app/src/pages/graph/Legend.tsx b/pkg/ui/react-app/src/pages/graph/Legend.tsx index 153f82f98a4..5e98904eea4 100644 --- a/pkg/ui/react-app/src/pages/graph/Legend.tsx +++ b/pkg/ui/react-app/src/pages/graph/Legend.tsx @@ -18,36 +18,38 @@ export class Legend extends PureComponent { state = { selectedIndexes: [] as number[], }; - componentDidUpdate(prevProps: LegendProps) { + componentDidUpdate(prevProps: LegendProps): void { if (this.props.shouldReset && prevProps.shouldReset !== this.props.shouldReset) { this.setState({ selectedIndexes: [] }); } } - handleSeriesSelect = (index: number) => (ev: React.MouseEvent) => { - // TODO: add proper event type - const { selectedIndexes } = this.state; + handleSeriesSelect = + (index: number) => + (ev: React.MouseEvent): void => { + // TODO: add proper event type + const { selectedIndexes } = this.state; - let selected = [index]; - if (ev.ctrlKey || ev.metaKey) { - const { chartData } = this.props; - if (selectedIndexes.includes(index)) { - selected = selectedIndexes.filter((idx) => idx !== index); - } else { - selected = - // Flip the logic - In case none is selected ctrl + click should deselect clicked series. - selectedIndexes.length === 0 - ? chartData.reduce((acc, _, i) => (i === index ? acc : [...acc, i]), []) - : [...selectedIndexes, index]; // Select multiple. + let selected = [index]; + if (ev.ctrlKey || ev.metaKey) { + const { chartData } = this.props; + if (selectedIndexes.includes(index)) { + selected = selectedIndexes.filter((idx) => idx !== index); + } else { + selected = + // Flip the logic - In case none is selected ctrl + click should deselect clicked series. + selectedIndexes.length === 0 + ? chartData.reduce((acc, _, i) => (i === index ? acc : [...acc, i]), []) + : [...selectedIndexes, index]; // Select multiple. + } + } else if (selectedIndexes.length === 1 && selectedIndexes.includes(index)) { + selected = []; } - } else if (selectedIndexes.length === 1 && selectedIndexes.includes(index)) { - selected = []; - } - this.setState({ selectedIndexes: selected }); - this.props.onSeriesToggle(selected, index); - }; + this.setState({ selectedIndexes: selected }); + this.props.onSeriesToggle(selected, index); + }; - render() { + render(): JSX.Element { const { chartData, onLegendMouseOut, onHover } = this.props; const { selectedIndexes } = this.state; const canUseHover = chartData.length > 1 && selectedIndexes.length === 0; diff --git a/pkg/ui/react-app/src/pages/graph/Panel.tsx b/pkg/ui/react-app/src/pages/graph/Panel.tsx index 35a23963065..0434f57a5b6 100644 --- a/pkg/ui/react-app/src/pages/graph/Panel.tsx +++ b/pkg/ui/react-app/src/pages/graph/Panel.tsx @@ -95,7 +95,7 @@ class Panel extends Component { this.handleStoreMatchChange = this.handleStoreMatchChange.bind(this); } - componentDidUpdate({ options: prevOpts }: PanelProps) { + componentDidUpdate({ options: prevOpts }: PanelProps): void { const { endTime, range, @@ -120,7 +120,7 @@ class Panel extends Component { } } - componentDidMount() { + componentDidMount(): void { this.executeQuery(); } @@ -249,24 +249,24 @@ class Panel extends Component { return this.props.options.endTime; }; - handleChangeEndTime = (endTime: number | null) => { + handleChangeEndTime = (endTime: number | null): void => { this.setOptions({ endTime: endTime }); }; - handleChangeResolution = (resolution: number | null) => { + handleChangeResolution = (resolution: number | null): void => { this.setOptions({ resolution: resolution }); }; - handleChangeMaxSourceResolution = (maxSourceResolution: string) => { + handleChangeMaxSourceResolution = (maxSourceResolution: string): void => { this.setOptions({ maxSourceResolution }); }; - handleChangeType = (type: PanelType) => { + handleChangeType = (type: PanelType): void => { this.setState({ data: null }); this.setOptions({ type: type }); }; - handleChangeStacking = (stacked: boolean) => { + handleChangeStacking = (stacked: boolean): void => { this.setOptions({ stacked: stacked }); }; @@ -286,7 +286,7 @@ class Panel extends Component { this.setState({ error: null }); }; - render() { + render(): JSX.Element { const { pastQueries, metricNames, options, id, stores } = this.props; return (
diff --git a/pkg/ui/react-app/src/pages/graph/TimeInput.tsx b/pkg/ui/react-app/src/pages/graph/TimeInput.tsx index e7b111c609e..d9382805890 100644 --- a/pkg/ui/react-app/src/pages/graph/TimeInput.tsx +++ b/pkg/ui/react-app/src/pages/graph/TimeInput.tsx @@ -39,7 +39,7 @@ class TimeInput extends Component { return this.props.time || moment().valueOf(); }; - calcShiftRange = () => this.props.range / 2; + calcShiftRange = (): number => this.props.range / 2; increaseTime = (): void => { const time = this.getBaseTime() + this.calcShiftRange(); @@ -59,7 +59,7 @@ class TimeInput extends Component { return this.props.useLocalTime ? moment.tz.guess() : 'UTC'; }; - componentDidMount() { + componentDidMount(): void { this.$time = $(this.timeInputRef.current!); this.$time.datetimepicker({ @@ -85,11 +85,11 @@ class TimeInput extends Component { }); } - componentWillUnmount() { + componentWillUnmount(): void { this.$time.datetimepicker('destroy'); } - componentDidUpdate(prevProps: TimeInputProps) { + componentDidUpdate(prevProps: TimeInputProps): void { const { time, useLocalTime } = this.props; if (prevProps.time !== time) { this.$time.datetimepicker('date', time ? moment(time) : null); @@ -99,7 +99,7 @@ class TimeInput extends Component { } } - render() { + render(): JSX.Element { return ( diff --git a/pkg/ui/react-app/src/utils/index.ts b/pkg/ui/react-app/src/utils/index.ts index fce9feabaa9..fe02722d5d7 100644 --- a/pkg/ui/react-app/src/utils/index.ts +++ b/pkg/ui/react-app/src/utils/index.ts @@ -4,11 +4,11 @@ import { PanelOptions, PanelType, PanelDefaultOptions } from '../pages/graph/Pan import { PanelMeta } from '../pages/graph/PanelList'; import { queryURL } from '../thanos/config'; -export const generateID = () => { +export const generateID = (): string => { return `_${Math.random().toString(36).substr(2, 9)}`; }; -export const byEmptyString = (p: string) => p.length > 0; +export const byEmptyString = (p: string): boolean => p.length > 0; export const isPresent = (obj: T): obj is NonNullable => obj !== null && obj !== undefined; @@ -27,7 +27,7 @@ export const escapeHTML = (str: string): string => { }); }; -export const metricToSeriesName = (labels: { [key: string]: string }) => { +export const metricToSeriesName = (labels: { [key: string]: string }): string => { if (labels === null) { return 'scalar'; } @@ -226,11 +226,13 @@ export const parseOption = (param: string): Partial => { return {}; }; -export const formatParam = (key: string) => (paramName: string, value: number | string | boolean) => { - return `g${key}.${paramName}=${encodeURIComponent(value)}`; -}; +export const formatParam = + (key: string) => + (paramName: string, value: number | string | boolean): string => { + return `g${key}.${paramName}=${encodeURIComponent(value)}`; + }; -export const toQueryString = ({ key, options }: PanelMeta) => { +export const toQueryString = ({ key, options }: PanelMeta): string => { const formatWithKey = formatParam(key); const { expr, @@ -260,15 +262,15 @@ export const toQueryString = ({ key, options }: PanelMeta) => { return urlParams.filter(byEmptyString).join('&'); }; -export const encodePanelOptionsToQueryString = (panels: PanelMeta[]) => { +export const encodePanelOptionsToQueryString = (panels: PanelMeta[]): string => { return `?${panels.map(toQueryString).join('&')}`; }; -export const createExpressionLink = (expr: string) => { +export const createExpressionLink = (expr: string): string => { return `../graph?g0.expr=${encodeURIComponent(expr)}&g0.tab=1&g0.stacked=0&g0.range_input=1h`; }; -export const createExternalExpressionLink = (expr: string) => { +export const createExternalExpressionLink = (expr: string): string => { const expLink = createExpressionLink(expr); return `${queryURL}${expLink.replace(/^\.\./, '')}`; }; diff --git a/pkg/ui/react-app/tsconfig.json b/pkg/ui/react-app/tsconfig.json index b8a3b865632..c062f25ccd2 100644 --- a/pkg/ui/react-app/tsconfig.json +++ b/pkg/ui/react-app/tsconfig.json @@ -17,7 +17,7 @@ "resolveJsonModule": true, "isolatedModules": true, "noEmit": true, - "jsx": "react", + "jsx": "react-jsx", "noFallthroughCasesInSwitch": true }, "include": [ From d5351b042845e6dc5a385dbe6498af125ce57bdc Mon Sep 17 00:00:00 2001 From: Arunprasad Rajkumar Date: Fri, 24 Sep 2021 21:51:37 +0530 Subject: [PATCH 04/33] Adjust and rename `ThanosSidecarUnhealthy` to `ThanosSidecarNoConnectionToStartedPrometheus`; Remove `ThanosSidecarPrometheusDown` alert; Remove unused `thanos_sidecar_last_heartbeat_success_time_seconds` metrics (#4508) * Refactor sidecar alerts Prior to this fix, ThanosSidecarUnhealthy would fire even when Prometheus is busy with WAL replay. This would trigger a false positive alert. This PR considers prometheus_tsdb_data_replay_duration_seconds metric from Prometheus for ThanosSidecarUnhealthy alert. In order to correlate Thanos and Prometheus metrics we need to specify common label(s) which can be confiured through thanosPrometheusCommonDimensions jsonnet variable. This PR also removes ThanosSidecarPrometheusDown as it would fire at the same as ThanosSidecarUnhealthy. Fixes https://github.com/thanos-io/thanos/issues/3915. Co-authored-by: Bartlomiej Plotka Signed-off-by: Arunprasad Rajkumar * Rename ThanosSidecarUnhealthy to ThanosSidecarNoConnectionToStartedPrometheus Signed-off-by: Arunprasad Rajkumar * Simplify ThanosSidecarNoConnectionToStartedPrometheus using thanos_sidecar_prometheus_up Signed-off-by: Arunprasad Rajkumar * Remove unused implementation of thanos_sidecar_last_heartbeat_success_time_seconds metric Signed-off-by: Arunprasad Rajkumar Co-authored-by: Bartlomiej Plotka --- CHANGELOG.md | 4 + cmd/thanos/sidecar.go | 6 -- examples/alerts/alerts.md | 24 ++---- examples/alerts/alerts.yaml | 24 ++---- examples/alerts/tests.yaml | 133 ++++++++++----------------------- mixin/README.md | 1 + mixin/alerts/sidecar.libsonnet | 25 ++----- mixin/config.libsonnet | 1 + mixin/runbook.md | 3 +- pkg/rules/rules_test.go | 2 +- 10 files changed, 71 insertions(+), 152 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3da3bf96754..29fbfe848f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,10 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus. - [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. +### Fixed + +- [#4508](https://github.com/thanos-io/thanos/pull/4508) Adjust and rename `ThanosSidecarUnhealthy` to `ThanosSidecarNoConnectionToStartedPrometheus`; Remove `ThanosSidecarPrometheusDown` alert; Remove unused `thanos_sidecar_last_heartbeat_success_time_seconds` metrics. + ## v0.23.0 - In Progress ### Added diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index fee36d65374..fbed45f4fa9 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -138,10 +138,6 @@ func runSidecar( Name: "thanos_sidecar_prometheus_up", Help: "Boolean indicator whether the sidecar can reach its Prometheus peer.", }) - lastHeartbeat := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ - Name: "thanos_sidecar_last_heartbeat_success_time_seconds", - Help: "Timestamp of the last successful heartbeat in seconds.", - }) ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { @@ -191,7 +187,6 @@ func runSidecar( ) promUp.Set(1) statusProber.Ready() - lastHeartbeat.SetToCurrentTime() return nil }) if err != nil { @@ -213,7 +208,6 @@ func runSidecar( promUp.Set(0) } else { promUp.Set(1) - lastHeartbeat.SetToCurrentTime() } return nil diff --git a/examples/alerts/alerts.md b/examples/alerts/alerts.md index 0d54adb4215..b274f4579f7 100644 --- a/examples/alerts/alerts.md +++ b/examples/alerts/alerts.md @@ -296,16 +296,6 @@ rules: ```yaml mdox-exec="cat examples/tmp/thanos-sidecar.yaml" name: thanos-sidecar rules: -- alert: ThanosSidecarPrometheusDown - annotations: - description: Thanos Sidecar {{$labels.instance}} cannot connect to Prometheus. - runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarprometheusdown - summary: Thanos Sidecar cannot connect to Prometheus - expr: | - thanos_sidecar_prometheus_up{job=~".*thanos-sidecar.*"} == 0 - for: 5m - labels: - severity: critical - alert: ThanosSidecarBucketOperationsFailed annotations: description: Thanos Sidecar {{$labels.instance}} bucket operations are failing @@ -316,14 +306,16 @@ rules: for: 5m labels: severity: critical -- alert: ThanosSidecarUnhealthy +- alert: ThanosSidecarNoConnectionToStartedPrometheus annotations: - description: Thanos Sidecar {{$labels.instance}} is unhealthy for more than {{$value}} - seconds. - runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy - summary: Thanos Sidecar is unhealthy. + description: Thanos Sidecar {{$labels.instance}} is unhealthy. + runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus + summary: Thanos Sidecar cannot access Prometheus, even though Prometheus seems + healthy and has reloaded WAL. expr: | - time() - max by (job, instance) (thanos_sidecar_last_heartbeat_success_time_seconds{job=~".*thanos-sidecar.*"}) >= 240 + thanos_sidecar_prometheus_up{job=~".*thanos-sidecar.*"} == 0 + AND on (namespace, pod) + prometheus_tsdb_data_replay_duration_seconds != 0 for: 5m labels: severity: critical diff --git a/examples/alerts/alerts.yaml b/examples/alerts/alerts.yaml index 8c0d7d7340d..867a88984b5 100644 --- a/examples/alerts/alerts.yaml +++ b/examples/alerts/alerts.yaml @@ -301,16 +301,6 @@ groups: severity: warning - name: thanos-sidecar rules: - - alert: ThanosSidecarPrometheusDown - annotations: - description: Thanos Sidecar {{$labels.instance}} cannot connect to Prometheus. - runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarprometheusdown - summary: Thanos Sidecar cannot connect to Prometheus - expr: | - thanos_sidecar_prometheus_up{job=~".*thanos-sidecar.*"} == 0 - for: 5m - labels: - severity: critical - alert: ThanosSidecarBucketOperationsFailed annotations: description: Thanos Sidecar {{$labels.instance}} bucket operations are failing @@ -321,14 +311,16 @@ groups: for: 5m labels: severity: critical - - alert: ThanosSidecarUnhealthy + - alert: ThanosSidecarNoConnectionToStartedPrometheus annotations: - description: Thanos Sidecar {{$labels.instance}} is unhealthy for more than - {{$value}} seconds. - runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy - summary: Thanos Sidecar is unhealthy. + description: Thanos Sidecar {{$labels.instance}} is unhealthy. + runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus + summary: Thanos Sidecar cannot access Prometheus, even though Prometheus seems + healthy and has reloaded WAL. expr: | - time() - max by (job, instance) (thanos_sidecar_last_heartbeat_success_time_seconds{job=~".*thanos-sidecar.*"}) >= 240 + thanos_sidecar_prometheus_up{job=~".*thanos-sidecar.*"} == 0 + AND on (namespace, pod) + prometheus_tsdb_data_replay_duration_seconds != 0 for: 5m labels: severity: critical diff --git a/examples/alerts/tests.yaml b/examples/alerts/tests.yaml index 64207c46f79..d04662243bc 100644 --- a/examples/alerts/tests.yaml +++ b/examples/alerts/tests.yaml @@ -7,127 +7,74 @@ evaluation_interval: 1m tests: - interval: 1m input_series: - - series: 'thanos_sidecar_last_heartbeat_success_time_seconds{namespace="production", job="thanos-sidecar", instance="thanos-sidecar-0"}' - values: '5 10 43 17 11 0 0 0' - - series: 'thanos_sidecar_last_heartbeat_success_time_seconds{namespace="production", job="thanos-sidecar", instance="thanos-sidecar-1"}' - values: '4 9 42 15 10 0 0 0' - promql_expr_test: - - expr: time() - eval_time: 1m - exp_samples: - - labels: '{}' - value: 60 - - expr: time() - eval_time: 2m - exp_samples: - - labels: '{}' - value: 120 - - expr: max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) - eval_time: 2m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 43 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 42 - - expr: max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) - eval_time: 10m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 0 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 0 - - expr: max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) - eval_time: 11m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 0 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 0 - - expr: time() - max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) - eval_time: 10m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 600 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 600 - - expr: time() - max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) - eval_time: 11m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 660 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 660 - - expr: time() - max(thanos_sidecar_last_heartbeat_success_time_seconds{job="thanos-sidecar"}) by (job, instance) >= 600 - eval_time: 12m - exp_samples: - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-0"}' - value: 720 - - labels: '{job="thanos-sidecar", instance="thanos-sidecar-1"}' - value: 720 + - series: 'thanos_sidecar_prometheus_up{namespace="production", job="thanos-sidecar", instance="thanos-sidecar-0", pod="prometheus-0"}' + values: '1x5 0x15' + - series: 'thanos_sidecar_prometheus_up{namespace="production", job="thanos-sidecar", instance="thanos-sidecar-1", pod="prometheus-1"}' + values: '1x4 0x15' + - series: 'prometheus_tsdb_data_replay_duration_seconds{namespace="production", job="prometheus-k8s", instance="prometheus-k8s-0", pod="prometheus-0"}' + values: '4x5 0x5 5x15' + - series: 'prometheus_tsdb_data_replay_duration_seconds{namespace="production", job="prometheus-k8s", instance="prometheus-k8s-1", pod="prometheus-1"}' + values: '10x14 0x6' alert_rule_test: - eval_time: 1m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus - eval_time: 2m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus - eval_time: 3m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus - eval_time: 10m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus exp_alerts: - - exp_labels: - severity: critical - job: thanos-sidecar - instance: thanos-sidecar-0 - exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-0 is unhealthy for more than 600 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' - exp_labels: severity: critical job: thanos-sidecar instance: thanos-sidecar-1 + namespace: production + pod: prometheus-1 exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy for more than 600 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' + description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy.' + runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus' + summary: 'Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.' - eval_time: 11m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus exp_alerts: - - exp_labels: - severity: critical - job: thanos-sidecar - instance: thanos-sidecar-0 - exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-0 is unhealthy for more than 660 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' - exp_labels: severity: critical job: thanos-sidecar instance: thanos-sidecar-1 + namespace: production + pod: prometheus-1 exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy for more than 660 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' + description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy.' + runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus' + summary: 'Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.' - eval_time: 12m - alertname: ThanosSidecarUnhealthy + alertname: ThanosSidecarNoConnectionToStartedPrometheus exp_alerts: - exp_labels: severity: critical job: thanos-sidecar - instance: thanos-sidecar-0 + instance: thanos-sidecar-1 + namespace: production + pod: prometheus-1 exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-0 is unhealthy for more than 720 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' + description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy.' + runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus' + summary: 'Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.' + - eval_time: 20m + alertname: ThanosSidecarNoConnectionToStartedPrometheus + exp_alerts: - exp_labels: severity: critical job: thanos-sidecar - instance: thanos-sidecar-1 + instance: thanos-sidecar-0 + namespace: production + pod: prometheus-0 exp_annotations: - description: 'Thanos Sidecar thanos-sidecar-1 is unhealthy for more than 720 seconds.' - runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy' - summary: 'Thanos Sidecar is unhealthy.' + description: 'Thanos Sidecar thanos-sidecar-0 is unhealthy.' + runbook_url: 'https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus' + summary: 'Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.' + - interval: 1m input_series: - series: 'prometheus_rule_evaluations_total{namespace="production", job="thanos-ruler", instance="thanos-ruler-0"}' diff --git a/mixin/README.md b/mixin/README.md index 4bb7ce797c8..baef01946cd 100644 --- a/mixin/README.md +++ b/mixin/README.md @@ -106,6 +106,7 @@ This project is intended to be used as a library. You can extend and customize d }, sidecar+:: { selector: 'job=~".*thanos-sidecar.*"', + thanosPrometheusCommonDimensions: 'namespace, pod', title: '%(prefix)sSidecar' % $.dashboard.prefix, }, // TODO(kakkoyun): Fix naming convention: bucketReplicate diff --git a/mixin/alerts/sidecar.libsonnet b/mixin/alerts/sidecar.libsonnet index b4682106192..5bdea985ab1 100644 --- a/mixin/alerts/sidecar.libsonnet +++ b/mixin/alerts/sidecar.libsonnet @@ -2,6 +2,7 @@ local thanos = self, sidecar+:: { selector: error 'must provide selector for Thanos Sidecar alerts', + thanosPrometheusCommonDimensions: error 'must provide commonDimensions between Thanos and Prometheus metrics for Sidecar alerts', dimensions: std.join(', ', std.objectFields(thanos.targetGroups) + ['job', 'instance']), }, prometheusAlerts+:: { @@ -10,20 +11,6 @@ { name: 'thanos-sidecar', rules: [ - { - alert: 'ThanosSidecarPrometheusDown', - annotations: { - description: 'Thanos Sidecar {{$labels.instance}}%s cannot connect to Prometheus.' % location, - summary: 'Thanos Sidecar cannot connect to Prometheus', - }, - expr: ||| - thanos_sidecar_prometheus_up{%(selector)s} == 0 - ||| % thanos.sidecar, - 'for': '5m', - labels: { - severity: 'critical', - }, - }, { alert: 'ThanosSidecarBucketOperationsFailed', annotations: { @@ -39,13 +26,15 @@ }, }, { - alert: 'ThanosSidecarUnhealthy', + alert: 'ThanosSidecarNoConnectionToStartedPrometheus', annotations: { - description: 'Thanos Sidecar {{$labels.instance}}%s is unhealthy for more than {{$value}} seconds.' % location, - summary: 'Thanos Sidecar is unhealthy.', + description: 'Thanos Sidecar {{$labels.instance}}%s is unhealthy.' % location, + summary: 'Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.', }, expr: ||| - time() - max by (%(dimensions)s) (thanos_sidecar_last_heartbeat_success_time_seconds{%(selector)s}) >= 240 + thanos_sidecar_prometheus_up{%(selector)s} == 0 + AND on (%(thanosPrometheusCommonDimensions)s) + prometheus_tsdb_data_replay_duration_seconds != 0 ||| % thanos.sidecar, 'for': '5m', labels: { diff --git a/mixin/config.libsonnet b/mixin/config.libsonnet index 634f2c7d6cd..e4d415d5ef9 100644 --- a/mixin/config.libsonnet +++ b/mixin/config.libsonnet @@ -46,6 +46,7 @@ }, sidecar+:: { selector: 'job=~".*thanos-sidecar.*"', + thanosPrometheusCommonDimensions: 'namespace, pod', title: '%(prefix)sSidecar' % $.dashboard.prefix, }, // TODO(kakkoyun): Fix naming convention: bucketReplicate diff --git a/mixin/runbook.md b/mixin/runbook.md index 03f92aed716..98e76b97820 100755 --- a/mixin/runbook.md +++ b/mixin/runbook.md @@ -85,9 +85,8 @@ |Name|Summary|Description|Severity|Runbook| |---|---|---|---|---| -|ThanosSidecarPrometheusDown|Thanos Sidecar cannot connect to Prometheus|Thanos Sidecar {{$labels.instance}} cannot connect to Prometheus.|critical|[https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarprometheusdown](https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarprometheusdown)| |ThanosSidecarBucketOperationsFailed|Thanos Sidecar bucket operations are failing|Thanos Sidecar {{$labels.instance}} bucket operations are failing|critical|[https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarbucketoperationsfailed](https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarbucketoperationsfailed)| -|ThanosSidecarUnhealthy|Thanos Sidecar is unhealthy.|Thanos Sidecar {{$labels.instance}} is unhealthy for more than {{$value}} seconds.|critical|[https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy](https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarunhealthy)| +|ThanosSidecarNoConnectionToStartedPrometheus|Thanos Sidecar cannot access Prometheus, even though Prometheus seems healthy and has reloaded WAL.|Thanos Sidecar {{$labels.instance}} is unhealthy.|critical|[https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus](https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus)| ## thanos-store diff --git a/pkg/rules/rules_test.go b/pkg/rules/rules_test.go index 01965a9b31b..8cb73ada6b3 100644 --- a/pkg/rules/rules_test.go +++ b/pkg/rules/rules_test.go @@ -82,7 +82,7 @@ func testRulesAgainstExamples(t *testing.T, dir string, server rulespb.RulesServ { Name: "thanos-sidecar", File: filepath.Join(dir, "alerts.yaml"), - Rules: []*rulespb.Rule{someAlert, someAlert, someAlert}, + Rules: []*rulespb.Rule{someAlert, someAlert}, Interval: 60, PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, }, From b4b8434ca5a4a873762d73bf22fa4e0e1f43f6f1 Mon Sep 17 00:00:00 2001 From: Jimmiehan Date: Mon, 27 Sep 2021 00:49:59 +0800 Subject: [PATCH 05/33] Store: Fix (*model.TimeOrDurationValue).String() when value is negative (#4702) Signed-off-by: Jimmiehan --- pkg/model/timeduration.go | 3 ++ pkg/model/timeduration_test.go | 62 ++++++++++++++++++++++------------ 2 files changed, 43 insertions(+), 22 deletions(-) diff --git a/pkg/model/timeduration.go b/pkg/model/timeduration.go index 1d525136f09..02da2aa7698 100644 --- a/pkg/model/timeduration.go +++ b/pkg/model/timeduration.go @@ -51,6 +51,9 @@ func (tdv *TimeOrDurationValue) String() string { case tdv.Time != nil: return tdv.Time.String() case tdv.Dur != nil: + if v := *tdv.Dur; v < 0 { + return "-" + (-v).String() + } return tdv.Dur.String() } diff --git a/pkg/model/timeduration_test.go b/pkg/model/timeduration_test.go index c1b0a72a737..2ea62b67ef7 100644 --- a/pkg/model/timeduration_test.go +++ b/pkg/model/timeduration_test.go @@ -14,26 +14,44 @@ import ( ) func TestTimeOrDurationValue(t *testing.T) { - cmd := kingpin.New("test", "test") - - minTime := model.TimeOrDuration(cmd.Flag("min-time", "Start of time range limit to serve")) - - maxTime := model.TimeOrDuration(cmd.Flag("max-time", "End of time range limit to serve"). - Default("9999-12-31T23:59:59Z")) - - _, err := cmd.Parse([]string{"--min-time", "10s"}) - if err != nil { - t.Fatal(err) - } - - testutil.Equals(t, "10s", minTime.String()) - testutil.Equals(t, "9999-12-31 23:59:59 +0000 UTC", maxTime.String()) - - prevTime := timestamp.FromTime(time.Now()) - afterTime := timestamp.FromTime(time.Now().Add(15 * time.Second)) - - testutil.Assert(t, minTime.PrometheusTimestamp() > prevTime, "minTime prometheus timestamp is less than time now.") - testutil.Assert(t, minTime.PrometheusTimestamp() < afterTime, "minTime prometheus timestamp is more than time now + 15s") - - testutil.Assert(t, maxTime.PrometheusTimestamp() == 253402300799000, "maxTime is not equal to 253402300799000") + t.Run("positive", func(t *testing.T) { + cmd := kingpin.New("test", "test") + + minTime := model.TimeOrDuration(cmd.Flag("min-time", "Start of time range limit to serve")) + + maxTime := model.TimeOrDuration(cmd.Flag("max-time", "End of time range limit to serve"). + Default("9999-12-31T23:59:59Z")) + + _, err := cmd.Parse([]string{"--min-time", "10s"}) + if err != nil { + t.Fatal(err) + } + + testutil.Equals(t, "10s", minTime.String()) + testutil.Equals(t, "9999-12-31 23:59:59 +0000 UTC", maxTime.String()) + + prevTime := timestamp.FromTime(time.Now()) + afterTime := timestamp.FromTime(time.Now().Add(15 * time.Second)) + + testutil.Assert(t, minTime.PrometheusTimestamp() > prevTime, "minTime prometheus timestamp is less than time now.") + testutil.Assert(t, minTime.PrometheusTimestamp() < afterTime, "minTime prometheus timestamp is more than time now + 15s") + + testutil.Assert(t, maxTime.PrometheusTimestamp() == 253402300799000, "maxTime is not equal to 253402300799000") + }) + + t.Run("negative", func(t *testing.T) { + cmd := kingpin.New("test-negative", "test-negative") + var minTime model.TimeOrDurationValue + cmd.Flag("min-time", "Start of time range limit to serve").SetValue(&minTime) + _, err := cmd.Parse([]string{"--min-time=-10s"}) + if err != nil { + t.Fatal(err) + } + testutil.Equals(t, "-10s", minTime.String()) + + prevTime := timestamp.FromTime(time.Now().Add(-15 * time.Second)) + afterTime := timestamp.FromTime(time.Now()) + testutil.Assert(t, minTime.PrometheusTimestamp() > prevTime, "minTime prometheus timestamp is less than time now - 15s.") + testutil.Assert(t, minTime.PrometheusTimestamp() < afterTime, "minTime prometheus timestamp is more than time now.") + }) } From e1bfe9d1d2abbd0be5d186f83b671b267f94ba33 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 27 Sep 2021 01:54:24 -0700 Subject: [PATCH 06/33] Add operation guide of bucket rewrite tool (#4701) * add operation guide of using bucket rewrite tool to modify object storage data Signed-off-by: Ben Ye * format docs Signed-off-by: Ben Ye * format docs again Signed-off-by: Ben Ye --- docs/operating/modify-objstore-data.md | 175 +++++++++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 docs/operating/modify-objstore-data.md diff --git a/docs/operating/modify-objstore-data.md b/docs/operating/modify-objstore-data.md new file mode 100644 index 00000000000..014168efc51 --- /dev/null +++ b/docs/operating/modify-objstore-data.md @@ -0,0 +1,175 @@ +# Modify series in the object storage via bucket rewrite tool + +For operational purposes, there are some use cases to manipulate data in the object storage. For example, delete some high cardinality metrics or relabel metrics if needed. This is already possible via the bucket rewrite tool. + +## Delete series + +```shell +thanos tools bucket rewrite --rewrite.to-delete-config-file config.yaml --objstore.config-file objstore.yaml --id +``` + +This is the example command to delete some data in the specified TSDB block from your object store bucket. For example, if `k8s_app_metric37` is the metric you want to delete, then the config file `config.yaml` would be: + +```yaml +- matchers: '{__name__="k8s_app_metric37"}' +``` + +Example output from my mac looks like below. Dry run mode is enabled by default to prevent unexpected series from being deleted. + +A changelog file is generated so that you can check the expected modification of the provided deletion request. + +```shell +thanos tools bucket rewrite --rewrite.to-delete-config-file config.yaml --objstore.config-file ~/local-bucket-config.yaml --id 01FET1EK9BC3E0QD4886RQCM8K + +level=info ts=2021-09-25T05:47:14.87316Z caller=factory.go:49 msg="loading bucket configuration" +level=info ts=2021-09-25T05:47:14.875365Z caller=tools_bucket.go:1078 msg="downloading block" source=01FET1EK9BC3E0QD4886RQCM8K +level=info ts=2021-09-25T05:47:14.887816Z caller=tools_bucket.go:1115 msg="changelog will be available" file=/var/folders/ny/yy113mqs6szcpjy2qrnhq9rh0000gq/T/thanos-rewrite/01FGDQWKJ7H29B3V4HCQ691WN9/change.log +level=info ts=2021-09-25T05:47:14.912544Z caller=tools_bucket.go:1130 msg="starting rewrite for block" source=01FET1EK9BC3E0QD4886RQCM8K new=01FGDQWKJ7H29B3V4HCQ691WN9 toDelete="- matchers: '{__name__=\"k8s_app_metric37\"}'\n" toRelabel= +level=info ts=2021-09-25T05:47:14.919438Z caller=compactor.go:41 msg="processed 10.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.925442Z caller=compactor.go:41 msg="processed 20.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.930263Z caller=compactor.go:41 msg="processed 30.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.934325Z caller=compactor.go:41 msg="processed 40.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.939466Z caller=compactor.go:41 msg="processed 50.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.944513Z caller=compactor.go:41 msg="processed 60.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.950254Z caller=compactor.go:41 msg="processed 70.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.955336Z caller=compactor.go:41 msg="processed 80.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.960193Z caller=compactor.go:41 msg="processed 90.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.964705Z caller=compactor.go:41 msg="processed 100.00% of 15000 series" +level=info ts=2021-09-25T05:47:14.964768Z caller=tools_bucket.go:1136 msg="dry run finished. Changes should be printed to stderr" +level=info ts=2021-09-25T05:47:14.965101Z caller=main.go:160 msg=exiting +``` + +Below is an example output of the changelog. All the series that match the given deletion config will be deleted. The last column `[{1630713615001 1630715400001}]` represents the start and end time of the series. + +```shell +cat /var/folders/ny/yy113mqs6szcpjy2qrnhq9rh0000gq/T/thanos-rewrite/01FGDQWKJ7H29B3V4HCQ691WN9/change.log + +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Deleted {__blockgen_target__="11", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="11", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="11", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Deleted {__blockgen_target__="12", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="12", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="12", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Deleted {__blockgen_target__="13", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="13", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="13", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +... +``` + +If the changelog output is expected, then we can use the same command in the first step, but with `--no-dry-run` flag to actually delete the data we want. + +```shell +thanos tools bucket rewrite --no-dry-run --rewrite.to-delete-config-file config.yaml --objstore.config-file objstore.yaml --id +``` + +The output is listed below. + +```shell +thanos tools bucket rewrite --no-dry-run --rewrite.to-delete-config-file config.yaml --objstore.config-file ~/local-bucket-config.yaml --id 01FET1EK9BC3E0QD4886RQCM8K + +level=info ts=2021-09-25T05:59:18.05232Z caller=factory.go:49 msg="loading bucket configuration" +level=info ts=2021-09-25T05:59:18.059056Z caller=tools_bucket.go:1078 msg="downloading block" source=01FET1EK9BC3E0QD4886RQCM8K +level=info ts=2021-09-25T05:59:18.074761Z caller=tools_bucket.go:1115 msg="changelog will be available" file=/var/folders/ny/yy113mqs6szcpjy2qrnhq9rh0000gq/T/thanos-rewrite/01FGDRJNST2EYDY2RKWFZJPGWJ/change.log +level=info ts=2021-09-25T05:59:18.108293Z caller=tools_bucket.go:1130 msg="starting rewrite for block" source=01FET1EK9BC3E0QD4886RQCM8K new=01FGDRJNST2EYDY2RKWFZJPGWJ toDelete="- matchers: '{__name__=\"k8s_app_metric37\"}'\n" toRelabel= +level=info ts=2021-09-25T05:59:18.395253Z caller=compactor.go:41 msg="processed 10.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.406416Z caller=compactor.go:41 msg="processed 20.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.419826Z caller=compactor.go:41 msg="processed 30.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.428238Z caller=compactor.go:41 msg="processed 40.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.436017Z caller=compactor.go:41 msg="processed 50.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.444738Z caller=compactor.go:41 msg="processed 60.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.452328Z caller=compactor.go:41 msg="processed 70.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.465218Z caller=compactor.go:41 msg="processed 80.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.477385Z caller=compactor.go:41 msg="processed 90.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.485254Z caller=compactor.go:41 msg="processed 100.00% of 15000 series" +level=info ts=2021-09-25T05:59:18.485296Z caller=tools_bucket.go:1140 msg="wrote new block after modifications; flushing" source=01FET1EK9BC3E0QD4886RQCM8K new=01FGDRJNST2EYDY2RKWFZJPGWJ +level=info ts=2021-09-25T05:59:18.662059Z caller=tools_bucket.go:1149 msg="uploading new block" source=01FET1EK9BC3E0QD4886RQCM8K new=01FGDRJNST2EYDY2RKWFZJPGWJ +level=info ts=2021-09-25T05:59:18.667883Z caller=tools_bucket.go:1159 msg=uploaded source=01FET1EK9BC3E0QD4886RQCM8K new=01FGDRJNST2EYDY2RKWFZJPGWJ +level=info ts=2021-09-25T05:59:18.667921Z caller=tools_bucket.go:1167 msg="rewrite done" IDs=01FET1EK9BC3E0QD4886RQCM8K +level=info ts=2021-09-25T05:59:18.668136Z caller=main.go:160 msg=exiting +``` + +After rewriting, a new block `01FGDRJNST2EYDY2RKWFZJPGWJ` will be uploaded to your object store bucket. + +However, the old block will not be deleted by default for the reason of safety. You can add `--delete-blocks` flag so that the source block will be marked as deletion after rewrite is done and will be deleted automatically if you have a compactor running against that bucket. + +### Advanced deletion config + +Multiple matchers can be added in the deletion config. + +For example, the config file below specifies deletion for all series that match: + +1. metric name `k8s_app_metric1` +2. metric name `k8s_app_metric37` and label `__blockgen_target__` that regexp matched `7.*` + +```yaml +- matchers: '{__name__="k8s_app_metric37", __blockgen_target__=~"7.*"}' +- matchers: '{__name__="k8s_app_metric1"}' +``` + +## Relabel series + +```shell +thanos tools bucket rewrite --rewrite.to-relabel-config-file config.yaml --objstore.config-file objstore.yaml --id +``` + +Series relabeling is needed when you want to rename your metrics or drop some high cardinality labels. The command is similar to rewrite deletion, but with `--rewrite.to-relabel-config-file` flag. The configuration is the same as [Prometheus relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). For example, the relabel config file does: + +1. delete all series that match `{__name__="k8s_app_metric37"}` + +2. rename `k8s_app_metric38` to `old_metric` + +```yaml +- action: drop + regex: k8s_app_metric37 + source_labels: [__name__] +- action: replace + source_labels: [__name__] + regex: k8s_app_metric38 + target_label: __name__ + replacement: old_metric +``` + +Example output of the changelog: + +```shell +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="1", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Relabelled {__blockgen_target__="1", __name__="k8s_app_metric38", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} {__blockgen_target__="1", __name__="old_metric", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="1", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} {__blockgen_target__="1", __name__="old_metric", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="1", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} {__blockgen_target__="1", __name__="old_metric", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="10", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Relabelled {__blockgen_target__="10", __name__="k8s_app_metric38", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} {__blockgen_target__="10", __name__="old_metric", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="10", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} {__blockgen_target__="10", __name__="old_metric", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="10", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} {__blockgen_target__="10", __name__="old_metric", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} [{1630713615001 1630715400001}] +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} [{1630715415000 1630719015000}] +Deleted {__blockgen_target__="100", __name__="k8s_app_metric37", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} [{1630719015000 1630720815000}] +Relabelled {__blockgen_target__="100", __name__="k8s_app_metric38", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} {__blockgen_target__="100", __name__="old_metric", next_rollout_time="2021-09-03 23:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="100", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} {__blockgen_target__="100", __name__="old_metric", next_rollout_time="2021-09-04 00:30:00 +0000 UTC"} +Relabelled {__blockgen_target__="100", __name__="k8s_app_metric38", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} {__blockgen_target__="100", __name__="old_metric", next_rollout_time="2021-09-04 01:30:00 +0000 UTC"} +... +``` + +If the output is expected, then you can add `--no-dry-run` flag to rewrite blocks. + +## Rewrite Prometheus TSDB blocks + +Thanos object storage supports `local filesystem`, which used local filesystem as bucket. If you want to delete/rewrite Prometheus TSDB, you can use the command below: + +```shell +thanos tools bucket rewrite --prom-blocks --rewrite.to-relabel-config-file config.yaml --objstore.config-file local-bucket.yaml --id +``` + +`--prom-blocks` disables external labels check when adding new blocks. For the local bucket config file, please refer to [this](https://thanos.io/tip/thanos/storage.md/#filesystem). From 9a949e799e1a8337459f8984a3cfe70f892c247c Mon Sep 17 00:00:00 2001 From: Jimmiehan Date: Mon, 27 Sep 2021 20:00:41 +0800 Subject: [PATCH 07/33] Store: fix panic on stop application (#4703) Signed-off-by: Jimmiehan --- cmd/thanos/query.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 50884810e4a..8b280229b2b 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -503,7 +503,6 @@ func runQuery( } }, func(error) { cancelUpdate() - close(fileSDUpdates) }) } // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. From fc8e64cc0114ed9d019de096470649cb8151db8a Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Mon, 27 Sep 2021 16:11:36 +0200 Subject: [PATCH 08/33] .*: Removed limit for received message size through gRPC. (#4708) Error log: observatorium-thanos-receive-default-9observatorium-thanos-receive-default-0 thanos-receive level=error ts=2021-09-24T18:13:03.444687849Z caller=handler.go:366 component=receive component=receive-handler err="10 errors: replicate write request for endpoint observatorium-thanos-receive-default-1.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-2.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6319597 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-1.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6319597 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-3.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6319597 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-7.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-8.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6197131 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-7.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6197131 vs. 4194304); store locally for endpoint observatorium-thanos-receive-default-9.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: conflict; replicate write request for endpoint observatorium-thanos-receive-default-0.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-2.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6119023 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-1.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6119023 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-0.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6119023 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-8.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-8.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6255490 vs. 4194304); store locally for endpoint observatorium-thanos-receive-default-9.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: conflict; forwarding request to endpoint observatorium-thanos-receive-default-0.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6255490 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-9.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-1.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6118562 vs. 4194304); store locally for endpoint observatorium-thanos-receive-default-9.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: conflict; forwarding request to endpoint observatorium-thanos-receive-default-0.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6118562 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-6.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-8.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5955344 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-7.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5955344 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-6.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5955344 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-2.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-2.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5867179 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-3.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5867179 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-4.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5867179 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-3.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-5.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6014110 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-3.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6014110 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-4.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6014110 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-4.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-5.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6064467 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-6.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6064467 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-4.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (6064467 vs. 4194304); replicate write request for endpoint observatorium-thanos-receive-default-5.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: quorum not reached: 3 errors: forwarding request to endpoint observatorium-thanos-receive-default-5.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5796760 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-7.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5796760 vs. 4194304); forwarding request to endpoint observatorium-thanos-receive-default-6.observatorium-thanos-receive-default.monitoring.svc.cluster.local:10901: rpc error: code = ResourceExhausted desc = grpc: received message larger than max (5796760 vs. 4194304)" msg="internal server error" Signed-off-by: Bartlomiej Plotka --- pkg/server/grpc/grpc.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/server/grpc/grpc.go b/pkg/server/grpc/grpc.go index 10c6f08e4bd..6a39ba5afbd 100644 --- a/pkg/server/grpc/grpc.go +++ b/pkg/server/grpc/grpc.go @@ -72,7 +72,11 @@ func New(logger log.Logger, reg prometheus.Registerer, tracer opentracing.Tracer } options.grpcOpts = append(options.grpcOpts, []grpc.ServerOption{ + // NOTE: It is recommended for gRPC messages to not go over 1MB, yet it is typical for remote write requests and store API responses to go over 4MB. + // Remove limits and allow users to use histogram message sizes to detect those situations. + // TODO(bwplotka): https://github.com/grpc-ecosystem/go-grpc-middleware/issues/462 grpc.MaxSendMsgSize(math.MaxInt32), + grpc.MaxRecvMsgSize(math.MaxInt32), grpc_middleware.WithUnaryServerChain( grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandler(grpcPanicRecoveryHandler)), met.UnaryServerInterceptor(), From 360b39e1c6ab3ac8dcefa225a6205142f9362c68 Mon Sep 17 00:00:00 2001 From: Matej Gera <38492574+matej-g@users.noreply.github.com> Date: Tue, 28 Sep 2021 13:45:03 +0200 Subject: [PATCH 09/33] e2e tests: Move to `efficientgo/e2e` (#4610) * Adjust e2ethanos pkg - Adjusts services to use e2e - Simplifies some func signatures, remove redundant funcs etc. - Adds service for reverse proxy (instead of using one on host) - Adds service for Minio (a workaround, see issue in the func comment) Signed-off-by: Matej Gera * Adjust e2e tests - Mechanical replacement / adjustments to new services, package imports etc. Signed-off-by: Matej Gera * go mod update; adjust gitignore Signed-off-by: Matej Gera * Fix condition in Query FE Signed-off-by: Matej Gera * Simplify propagation of container hostnames - Remove manually building hostname with netName - Use InternalEndpoint instead - Take advantage of future runnable to get hostname addresses for configs Signed-off-by: Matej Gera --- .gitignore | 2 +- go.mod | 3 +- pkg/objstore/s3/s3_e2e_test.go | 16 +- test/e2e/compact_test.go | 73 +++-- test/e2e/e2ethanos/helpers.go | 8 +- test/e2e/e2ethanos/service.go | 73 ++++- test/e2e/e2ethanos/services.go | 507 ++++++++++++++++++------------ test/e2e/exemplars_api_test.go | 77 ++--- test/e2e/metadata_api_test.go | 41 ++- test/e2e/query_frontend_test.go | 150 ++++----- test/e2e/query_test.go | 174 +++++----- test/e2e/receive_test.go | 369 ++++++++++------------ test/e2e/rule_test.go | 154 ++++----- test/e2e/rules_api_test.go | 59 ++-- test/e2e/store_gateway_test.go | 47 +-- test/e2e/targets_api_test.go | 26 +- test/e2e/tools_bucket_web_test.go | 82 ++--- 17 files changed, 995 insertions(+), 866 deletions(-) diff --git a/.gitignore b/.gitignore index 4e1783a9b8d..e5b6dfce009 100644 --- a/.gitignore +++ b/.gitignore @@ -12,7 +12,7 @@ kube/.minikube # Ignore e2e working dirs. data/ -test/e2e/e2e_integration_test* +test/e2e/e2e_* # Ignore promu artifacts. /.build diff --git a/go.mod b/go.mod index b7881ee5992..da09b9cbf2b 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/cortexproject/cortex v1.10.1-0.20210820081236-70dddb6b70b8 github.com/davecgh/go-spew v1.1.1 github.com/efficientgo/e2e v0.11.1-0.20210829161758-f4cc6dbdc6ea + github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6 github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fatih/structtag v1.1.0 @@ -33,7 +34,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go v2.0.2+incompatible - github.com/grafana/dskit v0.0.0-20210819132858-471020752967 + github.com/grafana/dskit v0.0.0-20210819132858-471020752967 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 diff --git a/pkg/objstore/s3/s3_e2e_test.go b/pkg/objstore/s3/s3_e2e_test.go index e837b9baa58..8991acdbfe8 100644 --- a/pkg/objstore/s3/s3_e2e_test.go +++ b/pkg/objstore/s3/s3_e2e_test.go @@ -9,8 +9,8 @@ import ( "strings" "testing" - "github.com/cortexproject/cortex/integration/e2e" - e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" "github.com/go-kit/kit/log" "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -23,19 +23,19 @@ func BenchmarkUpload(b *testing.B) { b.ReportAllocs() ctx := context.Background() - s, err := e2e.NewScenario("e2e_bench_mino_client") + e, err := e2e.NewDockerEnvironment("e2e_bench_mino_client") testutil.Ok(b, err) - b.Cleanup(e2ethanos.CleanScenario(b, s)) + b.Cleanup(e2ethanos.CleanScenario(b, e)) - const bucket = "test" - m := e2edb.NewMinio(8080, bucket) - testutil.Ok(b, s.StartAndWaitReady(m)) + const bucket = "benchmark" + m := e2ethanos.NewMinio(e, "benchmark", bucket) + testutil.Ok(b, e2e.StartAndWaitReady(m)) bkt, err := s3.NewBucketWithConfig(log.NewNopLogger(), s3.Config{ Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.HTTPEndpoint(), + Endpoint: m.Endpoint("http"), Insecure: true, }, "test-feed") testutil.Ok(b, err) diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index 2359816ed8e..cc0a61a05f7 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -16,8 +16,9 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" - e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" + "github.com/efficientgo/e2e/matchers" "github.com/go-kit/kit/log" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" @@ -33,6 +34,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" + "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -336,22 +338,22 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { if penaltyDedup { name = "e2e_test_compact_penalty_dedup" } - s, err := e2e.NewScenario(name) + e, err := e2e.NewDockerEnvironment(name) testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - dir := filepath.Join(s.SharedDir(), "tmp") + dir := filepath.Join(e.SharedDir(), "tmp") testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) const bucket = "compact_test" - m := e2edb.NewMinio(8080, bucket) - testutil.Ok(t, s.StartAndWaitReady(m)) + m := e2ethanos.NewMinio(e, "minio", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) bkt, err := s3.NewBucketWithConfig(logger, s3.Config{ Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.HTTPEndpoint(), // We need separate client config, when connecting to minio from outside. + Endpoint: m.Endpoint("http"), // We need separate client config, when connecting to minio from outside. Insecure: true, }, "test-feed") testutil.Ok(t, err) @@ -363,7 +365,10 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { for _, b := range blocks { id, err := b.Create(ctx, dir, justAfterConsistencyDelay, b.hashFunc) testutil.Ok(t, err) - testutil.Ok(t, objstore.UploadDir(ctx, logger, bkt, path.Join(dir, id.String()), id.String())) + testutil.Ok(t, runutil.Retry(time.Second, ctx.Done(), func() error { + return objstore.UploadDir(ctx, logger, bkt, path.Join(dir, id.String()), id.String()) + })) + rawBlockIDs[id] = struct{}{} if b.markedForNoCompact { testutil.Ok(t, block.MarkForNoCompact(ctx, logger, bkt, id, metadata.ManualNoCompactReason, "why not", promauto.With(nil).NewCounter(prometheus.CounterOpts{}))) @@ -442,26 +447,26 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.InternalEndpoint("http"), Insecure: true, }, } - str, err := e2ethanos.NewStoreGW(s.SharedDir(), "1", svcConfig) + str, err := e2ethanos.NewStoreGW(e, "1", svcConfig) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(str)) + testutil.Ok(t, e2e.StartAndWaitReady(str)) testutil.Ok(t, str.WaitSumMetrics(e2e.Equals(float64(len(rawBlockIDs)+7)), "thanos_blocks_meta_synced")) testutil.Ok(t, str.WaitSumMetrics(e2e.Equals(0), "thanos_blocks_meta_sync_failures_total")) testutil.Ok(t, str.WaitSumMetrics(e2e.Equals(0), "thanos_blocks_meta_modified")) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", str.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", str.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) // Check if query detects current series, even if overlapped. - queryAndAssert(t, ctx, q.HTTPEndpoint(), + queryAndAssert(t, ctx, q.Endpoint("http"), fmt.Sprintf(`count_over_time({a="1"}[13h] offset %ds)`, int64(time.Since(now.Add(12*time.Hour)).Seconds())), promclient.QueryOptions{ Deduplicate: false, // This should be false, so that we can be sure deduplication was offline. @@ -599,7 +604,7 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { // Precreate a directory. It should be deleted. // In a hypothetical scenario, the directory could be a left-over from // a compaction that had crashed. - p := filepath.Join(s.SharedDir(), "data", "compact", "expect-to-halt", "compact") + p := filepath.Join(e.SharedDir(), "data", "compact", "expect-to-halt", "compact") testutil.Assert(t, len(blocksWithHashes) > 0) @@ -613,9 +618,9 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, err) testutil.Ok(t, f.Close()) - c, err := e2ethanos.NewCompactor(s.SharedDir(), "expect-to-halt", svcConfig, nil) + c, err := e2ethanos.NewCompactor(e, "expect-to-halt", svcConfig, nil) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(c)) + testutil.Ok(t, e2e.StartAndWaitReady(c)) // Expect compactor halted and for one cleanup iteration to happen. testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(1), "thanos_compact_halted")) @@ -626,10 +631,10 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_blocks_meta_modified")) // The compact directory is still there. - dataDir := filepath.Join(s.SharedDir(), "data", "compact", "expect-to-halt") + dataDir := filepath.Join(e.SharedDir(), "data", "compact", "expect-to-halt") empty, err := isEmptyDir(dataDir) testutil.Ok(t, err) - testutil.Equals(t, false, empty, "directory %s should not be empty", dataDir) + testutil.Equals(t, false, empty, "directory %e should not be empty", dataDir) // We expect no ops. testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_compact_iterations_total")) @@ -646,10 +651,10 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(2), "thanos_compact_blocks_cleaned_total")) // Ensure bucket UI. - ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(c.HTTPEndpoint(), "global")) - ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(c.HTTPEndpoint(), "loaded")) + ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(c.Endpoint("http"), "global")) + ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(c.Endpoint("http"), "loaded")) - testutil.Ok(t, s.Stop(c)) + testutil.Ok(t, c.Stop()) _, err = os.Stat(randBlockDir) testutil.NotOk(t, err) @@ -661,7 +666,7 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { // Dedup enabled; compactor should work as expected. { // Predownload block dirs with hashes. We should not try downloading them again. - p := filepath.Join(s.SharedDir(), "data", "compact", "working") + p := filepath.Join(e.SharedDir(), "data", "compact", "working") for _, id := range blocksWithHashes { m, err := block.DownloadMeta(ctx, logger, bkt, id) @@ -677,9 +682,9 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { } // We expect 2x 4-block compaction, 2-block vertical compaction, 2x 3-block compaction. - c, err := e2ethanos.NewCompactor(s.SharedDir(), "working", svcConfig, nil, extArgs...) + c, err := e2ethanos.NewCompactor(e, "working", svcConfig, nil, extArgs...) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(c)) + testutil.Ok(t, e2e.StartAndWaitReady(c)) // NOTE: We cannot assert on intermediate `thanos_blocks_meta_` metrics as those are gauge and change dynamically due to many // compaction groups. Wait for at least first compaction iteration (next is in 5m). @@ -706,9 +711,9 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_compact_halted")) - bucketMatcher, err := labels.NewMatcher(labels.MatchEqual, "bucket", bucket) + bucketMatcher, err := matchers.NewMatcher(matchers.MatchEqual, "bucket", bucket) testutil.Ok(t, err) - operationMatcher, err := labels.NewMatcher(labels.MatchEqual, "operation", "get") + operationMatcher, err := matchers.NewMatcher(matchers.MatchEqual, "operation", "get") testutil.Ok(t, err) testutil.Ok(t, c.WaitSumMetricsWithOptions(e2e.Equals(478), []string{"thanos_objstore_bucket_operations_total"}, e2e.WithLabelMatchers( @@ -718,13 +723,13 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { ) // Make sure compactor does not modify anything else over time. - testutil.Ok(t, s.Stop(c)) + testutil.Ok(t, c.Stop()) ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) // Check if query detects new blocks. - queryAndAssert(t, ctx, q.HTTPEndpoint(), + queryAndAssert(t, ctx, q.Endpoint("http"), fmt.Sprintf(`count_over_time({a="1"}[13h] offset %ds)`, int64(time.Since(now.Add(12*time.Hour)).Seconds())), promclient.QueryOptions{ Deduplicate: false, // This should be false, so that we can be sure deduplication was offline. @@ -742,9 +747,9 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { if penaltyDedup { extArgs = append(extArgs, "--deduplication.func=penalty") } - c, err := e2ethanos.NewCompactor(s.SharedDir(), "working", svcConfig, nil, extArgs...) + c, err := e2ethanos.NewCompactor(e, "working-dedup", svcConfig, nil, extArgs...) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(c)) + testutil.Ok(t, e2e.StartAndWaitReady(c)) // NOTE: We cannot assert on intermediate `thanos_blocks_meta_` metrics as those are gauge and change dynamically due to many // compaction groups. Wait for at least first compaction iteration (next is in 5m). @@ -767,13 +772,13 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_compact_halted")) // Make sure compactor does not modify anything else over time. - testutil.Ok(t, s.Stop(c)) + testutil.Ok(t, c.Stop()) ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) // Check if query detects new blocks. - queryAndAssert(t, ctx, q.HTTPEndpoint(), + queryAndAssert(t, ctx, q.Endpoint("http"), fmt.Sprintf(`count_over_time({a="1"}[13h] offset %ds)`, int64(time.Since(now.Add(12*time.Hour)).Seconds())), promclient.QueryOptions{ Deduplicate: false, // This should be false, so that we can be sure deduplication was offline. diff --git a/test/e2e/e2ethanos/helpers.go b/test/e2e/e2ethanos/helpers.go index 0bcd21c9128..22ef3d81415 100644 --- a/test/e2e/e2ethanos/helpers.go +++ b/test/e2e/e2ethanos/helpers.go @@ -11,16 +11,16 @@ import ( "strings" "testing" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/thanos-io/thanos/pkg/testutil" ) -func CleanScenario(t testing.TB, s *e2e.Scenario) func() { +func CleanScenario(t testing.TB, e *e2e.DockerEnvironment) func() { return func() { // Make sure Clean can properly delete everything. - testutil.Ok(t, exec.Command("chmod", "-R", "777", s.SharedDir()).Run()) - s.Close() + testutil.Ok(t, exec.Command("chmod", "-R", "777", e.SharedDir()).Run()) + e.Close() } } diff --git a/test/e2e/e2ethanos/service.go b/test/e2e/e2ethanos/service.go index 57f5dea453d..2a8d0bd31f8 100644 --- a/test/e2e/e2ethanos/service.go +++ b/test/e2e/e2ethanos/service.go @@ -4,35 +4,74 @@ package e2ethanos import ( - "github.com/cortexproject/cortex/integration/e2e" -) + "os" + "strconv" -type Service struct { - *e2e.HTTPService + "github.com/efficientgo/e2e" +) - grpc int +type Port struct { + Name string + PortNum int + IsMetrics bool } func NewService( + e e2e.Environment, name string, image string, - command *e2e.Command, + command e2e.Command, readiness *e2e.HTTPReadinessProbe, http, grpc int, - otherPorts ...int, -) *Service { - return &Service{ - HTTPService: e2e.NewHTTPService(name, image, command, readiness, http, append(otherPorts, grpc)...), - grpc: grpc, - } + otherPorts ...Port, +) *e2e.InstrumentedRunnable { + return newUninitiatedService(e, name, http, grpc, otherPorts...).Init( + e2e.StartOptions{ + Image: image, + Command: command, + Readiness: readiness, + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, + ) } -func (s *Service) GRPCEndpoint() string { return s.Endpoint(s.grpc) } +func newUninitiatedService( + e e2e.Environment, + name string, + http, grpc int, + otherPorts ...Port, +) *e2e.FutureInstrumentedRunnable { + metricsPorts := "http" + ports := map[string]int{ + "http": http, + "grpc": grpc, + } -func (s *Service) GRPCNetworkEndpoint() string { - return s.NetworkEndpoint(s.grpc) + for _, op := range otherPorts { + ports[op.Name] = op.PortNum + + if op.IsMetrics { + metricsPorts = op.Name + } + } + + return e2e.NewInstrumentedRunnable(e, name, ports, metricsPorts) } -func (s *Service) GRPCNetworkEndpointFor(networkName string) string { - return s.NetworkEndpointFor(networkName, s.grpc) +func initiateService( + service *e2e.FutureInstrumentedRunnable, + image string, + command e2e.Command, + readiness *e2e.HTTPReadinessProbe, +) *e2e.InstrumentedRunnable { + return service.Init( + e2e.StartOptions{ + Image: image, + Command: command, + Readiness: readiness, + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, + ) } diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index c15c150cbb6..e129d1dd6c0 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -13,8 +13,9 @@ import ( "strings" "time" - "github.com/cortexproject/cortex/integration/e2e" - "github.com/grafana/dskit/backoff" + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" + "github.com/efficientgo/tools/core/pkg/backoff" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -28,12 +29,15 @@ import ( "github.com/thanos-io/thanos/pkg/receive" ) -const infoLogLevel = "info" +const ( + infoLogLevel = "info" + ContainerSharedDir = "/shared" +) // Same as default for now. var defaultBackoffConfig = backoff.Config{ - MinBackoff: 300 * time.Millisecond, - MaxBackoff: 600 * time.Millisecond, + Min: 300 * time.Millisecond, + Max: 600 * time.Millisecond, MaxRetries: 50, } @@ -60,9 +64,9 @@ func DefaultImage() string { return "thanos" } -func NewPrometheus(sharedDir, name, config, promImage string, enableFeatures ...string) (*e2e.HTTPService, string, error) { - dir := filepath.Join(sharedDir, "data", "prometheus", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "prometheus", name) +func NewPrometheus(e e2e.Environment, name, config, promImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, string, error) { + dir := filepath.Join(e.SharedDir(), "data", "prometheus", name) + container := filepath.Join(ContainerSharedDir, "data", "prometheus", name) if err := os.MkdirAll(dir, 0750); err != nil { return nil, "", errors.Wrap(err, "create prometheus dir") } @@ -82,31 +86,35 @@ func NewPrometheus(sharedDir, name, config, promImage string, enableFeatures ... if len(enableFeatures) > 0 { args = append(args, fmt.Sprintf("--enable-feature=%s", strings.Join(enableFeatures, ","))) } - prom := e2e.NewHTTPService( + prom := e2e.NewInstrumentedRunnable( + e, fmt.Sprintf("prometheus-%s", name), - promImage, - e2e.NewCommandWithoutEntrypoint("prometheus", args...), - e2e.NewHTTPReadinessProbe(9090, "/-/ready", 200, 200), - 9090, + map[string]int{"http": 9090}, + "http").Init( + e2e.StartOptions{ + Image: promImage, + Command: e2e.NewCommandWithoutEntrypoint("prometheus", args...), + Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, ) - prom.SetUser(strconv.Itoa(os.Getuid())) - prom.SetBackoff(defaultBackoffConfig) return prom, container, nil } -func NewPrometheusWithSidecar(sharedDir, netName, name, config, promImage string, enableFeatures ...string) (*e2e.HTTPService, *Service, error) { - return NewPrometheusWithSidecarCustomImage(sharedDir, netName, name, config, promImage, DefaultImage(), enableFeatures...) +func NewPrometheusWithSidecar(e e2e.Environment, name, config, promImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { + return NewPrometheusWithSidecarCustomImage(e, name, config, promImage, DefaultImage(), enableFeatures...) } -func NewPrometheusWithSidecarCustomImage(sharedDir, netName, name, config, promImage string, sidecarImage string, enableFeatures ...string) (*e2e.HTTPService, *Service, error) { - prom, dataDir, err := NewPrometheus(sharedDir, name, config, promImage, enableFeatures...) +func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promImage string, sidecarImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { + prom, dataDir, err := NewPrometheus(e, name, config, promImage, enableFeatures...) if err != nil { return nil, nil, err } - prom.SetBackoff(defaultBackoffConfig) sidecar := NewService( + e, fmt.Sprintf("sidecar-%s", name), sidecarImage, e2e.NewCommand("sidecar", e2e.BuildArgs(map[string]string{ @@ -114,21 +122,20 @@ func NewPrometheusWithSidecarCustomImage(sharedDir, netName, name, config, promI "--grpc-address": ":9091", "--grpc-grace-period": "0s", "--http-address": ":8080", - "--prometheus.url": "http://" + prom.NetworkEndpointFor(netName, 9090), + "--prometheus.url": "http://" + prom.InternalEndpoint("http"), "--tsdb.path": dataDir, "--log.level": infoLogLevel, })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, ) - sidecar.SetUser(strconv.Itoa(os.Getuid())) - sidecar.SetBackoff(defaultBackoffConfig) return prom, sidecar, nil } type QuerierBuilder struct { + environment e2e.Environment sharedDir string name string routePrefix string @@ -145,9 +152,10 @@ type QuerierBuilder struct { tracingConfig string } -func NewQuerierBuilder(sharedDir, name string, storeAddresses ...string) *QuerierBuilder { +func NewQuerierBuilder(e e2e.Environment, name string, storeAddresses ...string) *QuerierBuilder { return &QuerierBuilder{ - sharedDir: sharedDir, + environment: e, + sharedDir: e.SharedDir(), name: name, storeAddresses: storeAddresses, image: DefaultImage(), @@ -199,7 +207,52 @@ func (q *QuerierBuilder) WithTracingConfig(tracingConfig string) *QuerierBuilder return q } -func (q *QuerierBuilder) Build() (*Service, error) { +func (q *QuerierBuilder) BuildUninitiated() *e2e.FutureInstrumentedRunnable { + return newUninitiatedService( + q.environment, + fmt.Sprintf("querier-%v", q.name), + 8080, + 9091, + ) +} + +func (q *QuerierBuilder) Initiate(service *e2e.FutureInstrumentedRunnable, storeAddresses ...string) (*e2e.InstrumentedRunnable, error) { + q.storeAddresses = storeAddresses + args, err := q.collectArgs() + if err != nil { + return nil, err + } + + querier := initiateService( + service, + q.image, + e2e.NewCommand("query", args...), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + ) + + return querier, nil +} + +func (q *QuerierBuilder) Build() (*e2e.InstrumentedRunnable, error) { + args, err := q.collectArgs() + if err != nil { + return nil, err + } + + querier := NewService( + q.environment, + fmt.Sprintf("querier-%v", q.name), + q.image, + e2e.NewCommand("query", args...), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + 8080, + 9091, + ) + + return querier, nil +} + +func (q *QuerierBuilder) collectArgs() ([]string, error) { const replicaLabel = "replica" args := e2e.BuildArgs(map[string]string{ @@ -235,7 +288,7 @@ func (q *QuerierBuilder) Build() (*Service, error) { if len(q.fileSDStoreAddresses) > 0 { queryFileSDDir := filepath.Join(q.sharedDir, "data", "querier", q.name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", q.name) + container := filepath.Join(ContainerSharedDir, "data", "querier", q.name) if err := os.MkdirAll(queryFileSDDir, 0750); err != nil { return nil, errors.Wrap(err, "create query dir failed") } @@ -269,33 +322,31 @@ func (q *QuerierBuilder) Build() (*Service, error) { args = append(args, "--tracing.config="+q.tracingConfig) } - querier := NewService( - fmt.Sprintf("querier-%v", q.name), - q.image, - e2e.NewCommand("query", args...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, - 9091, - ) - querier.SetUser(strconv.Itoa(os.Getuid())) - querier.SetBackoff(defaultBackoffConfig) - - return querier, nil + return args, nil } func RemoteWriteEndpoint(addr string) string { return fmt.Sprintf("http://%s/api/v1/receive", addr) } -// NewRoutingAndIngestingReceiver creates a Thanos Receive instances that is configured both for ingesting samples and routing samples to other receivers. -func NewRoutingAndIngestingReceiver(sharedDir, networkName, name string, replicationFactor int, hashring ...receive.HashringConfig) (*Service, error) { +// NewUninitiatedReceiver returns a future receiver that can be initiated. It is useful +// for obtaining a receiver address for hashring before the receiver is started. +func NewUninitiatedReceiver(e e2e.Environment, name string) *e2e.FutureInstrumentedRunnable { + return newUninitiatedService(e, fmt.Sprintf("receive-%v", name), 8080, 9091, Port{Name: "remote-write", PortNum: 8081}) +} - localEndpoint := NewService(fmt.Sprintf("receive-%v", name), "", e2e.NewCommand("", ""), nil, 8080, 9091, 8081).GRPCNetworkEndpointFor(networkName) +// NewRoutingAndIngestingReceiverFromService creates a Thanos Receive instances from an unitiated service. +// It is configured both for ingesting samples and routing samples to other receivers. +func NewRoutingAndIngestingReceiverFromService(service *e2e.FutureInstrumentedRunnable, sharedDir string, replicationFactor int, hashring ...receive.HashringConfig) (*e2e.InstrumentedRunnable, error) { + var localEndpoint string if len(hashring) == 0 { + localEndpoint = "0.0.0.0:9091" hashring = []receive.HashringConfig{{Endpoints: []string{localEndpoint}}} + } else { + localEndpoint = service.InternalEndpoint("grpc") } - dir := filepath.Join(sharedDir, "data", "receive", name) + dir := filepath.Join(sharedDir, "data", "receive", service.Name()) dataDir := filepath.Join(dir, "data") - container := filepath.Join(e2e.ContainerSharedDir, "data", "receive", name) + container := filepath.Join(ContainerSharedDir, "data", "receive", service.Name()) if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, errors.Wrap(err, "create receive dir") } @@ -304,44 +355,87 @@ func NewRoutingAndIngestingReceiver(sharedDir, networkName, name string, replica return nil, errors.Wrapf(err, "generate hashring file: %v", hashring) } - receiver := NewService( - fmt.Sprintf("receive-%v", name), + receiver := initiateService( + service, DefaultImage(), // TODO(bwplotka): BuildArgs should be interface. e2e.NewCommand("receive", e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("receive-%v", name), + "--debug.name": service.Name(), "--grpc-address": ":9091", "--grpc-grace-period": "0s", "--http-address": ":8080", "--remote-write.address": ":8081", - "--label": fmt.Sprintf(`receive="%s"`, name), + "--label": fmt.Sprintf(`receive="%s"`, service.Name()), "--tsdb.path": filepath.Join(container, "data"), "--log.level": infoLogLevel, "--receive.replication-factor": strconv.Itoa(replicationFactor), "--receive.local-endpoint": localEndpoint, "--receive.hashrings": string(b), })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, - 9091, - 8081, + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + ) + + return receiver, nil +} + +func NewRoutingAndIngestingReceiverWithConfigWatcher(service *e2e.FutureInstrumentedRunnable, sharedDir string, replicationFactor int, hashring ...receive.HashringConfig) (*e2e.InstrumentedRunnable, error) { + var localEndpoint string + if len(hashring) == 0 { + localEndpoint = "0.0.0.0:9091" + hashring = []receive.HashringConfig{{Endpoints: []string{localEndpoint}}} + } else { + localEndpoint = service.InternalEndpoint("grpc") + } + + dir := filepath.Join(sharedDir, "data", "receive", service.Name()) + dataDir := filepath.Join(dir, "data") + container := filepath.Join(ContainerSharedDir, "data", "receive", service.Name()) + if err := os.MkdirAll(dataDir, 0750); err != nil { + return nil, errors.Wrap(err, "create receive dir") + } + b, err := json.Marshal(hashring) + if err != nil { + return nil, errors.Wrapf(err, "generate hashring file: %v", hashring) + } + + if err := ioutil.WriteFile(filepath.Join(dir, "hashrings.json"), b, 0600); err != nil { + return nil, errors.Wrap(err, "creating receive config") + } + + receiver := initiateService( + service, + DefaultImage(), + // TODO(bwplotka): BuildArgs should be interface. + e2e.NewCommand("receive", e2e.BuildArgs(map[string]string{ + "--debug.name": service.Name(), + "--grpc-address": ":9091", + "--grpc-grace-period": "0s", + "--http-address": ":8080", + "--remote-write.address": ":8081", + "--label": fmt.Sprintf(`receive="%s"`, service.Name()), + "--tsdb.path": filepath.Join(container, "data"), + "--log.level": infoLogLevel, + "--receive.replication-factor": strconv.Itoa(replicationFactor), + "--receive.local-endpoint": localEndpoint, + "--receive.hashrings-file": filepath.Join(container, "hashrings.json"), + "--receive.hashrings-file-refresh-interval": "5s", + })...), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), ) - receiver.SetUser(strconv.Itoa(os.Getuid())) - receiver.SetBackoff(defaultBackoffConfig) return receiver, nil } // NewRoutingReceiver creates a Thanos Receive instance that is only configured to route to other receive instances. It has no local storage. -func NewRoutingReceiver(sharedDir, name string, replicationFactor int, hashring ...receive.HashringConfig) (*Service, error) { +func NewRoutingReceiver(e e2e.Environment, name string, replicationFactor int, hashring ...receive.HashringConfig) (*e2e.InstrumentedRunnable, error) { if len(hashring) == 0 { return nil, errors.New("hashring should not be empty for receive-distributor mode") } - dir := filepath.Join(sharedDir, "data", "receive", name) + dir := filepath.Join(e.SharedDir(), "data", "receive", name) dataDir := filepath.Join(dir, "data") - container := filepath.Join(e2e.ContainerSharedDir, "data", "receive", name) + container := filepath.Join(ContainerSharedDir, "data", "receive", name) if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, errors.Wrap(err, "create receive dir") } @@ -351,6 +445,7 @@ func NewRoutingReceiver(sharedDir, name string, replicationFactor int, hashring } receiver := NewService( + e, fmt.Sprintf("receive-%v", name), DefaultImage(), // TODO(bwplotka): BuildArgs should be interface. @@ -366,26 +461,24 @@ func NewRoutingReceiver(sharedDir, name string, replicationFactor int, hashring "--receive.replication-factor": strconv.Itoa(replicationFactor), "--receive.hashrings": string(b), })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, - 8081, + Port{Name: "remote-write", PortNum: 8081}, ) - receiver.SetUser(strconv.Itoa(os.Getuid())) - receiver.SetBackoff(defaultBackoffConfig) return receiver, nil } // NewIngestingReceiver creates a Thanos Receive instance that is only configured to ingest, not route to other receivers. -func NewIngestingReceiver(sharedDir, name string) (*Service, error) { - dir := filepath.Join(sharedDir, "data", "receive", name) +func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunnable, error) { + dir := filepath.Join(e.SharedDir(), "data", "receive", name) dataDir := filepath.Join(dir, "data") - container := filepath.Join(e2e.ContainerSharedDir, "data", "receive", name) + container := filepath.Join(ContainerSharedDir, "data", "receive", name) if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, errors.Wrap(err, "create receive dir") } - receiver := NewService( + receiver := NewService(e, fmt.Sprintf("receive-%v", name), DefaultImage(), // TODO(bwplotka): BuildArgs should be interface. @@ -399,70 +492,18 @@ func NewIngestingReceiver(sharedDir, name string) (*Service, error) { "--tsdb.path": filepath.Join(container, "data"), "--log.level": infoLogLevel, })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, - 8081, + Port{Name: "remote-write", PortNum: 8081}, ) - receiver.SetUser(strconv.Itoa(os.Getuid())) - receiver.SetBackoff(defaultBackoffConfig) return receiver, nil } -func NewRoutingAndIngestingReceiverWithConfigWatcher(sharedDir, networkName, name string, replicationFactor int, hashring ...receive.HashringConfig) (*Service, error) { - localEndpoint := NewService(fmt.Sprintf("receive-%v", name), "", e2e.NewCommand("", ""), nil, 8080, 9091, 8081).GRPCNetworkEndpointFor(networkName) - if len(hashring) == 0 { - hashring = []receive.HashringConfig{{Endpoints: []string{localEndpoint}}} - } - - dir := filepath.Join(sharedDir, "data", "receive", name) - dataDir := filepath.Join(dir, "data") - container := filepath.Join(e2e.ContainerSharedDir, "data", "receive", name) - if err := os.MkdirAll(dataDir, 0750); err != nil { - return nil, errors.Wrap(err, "create receive dir") - } - b, err := json.Marshal(hashring) - if err != nil { - return nil, errors.Wrapf(err, "generate hashring file: %v", hashring) - } - - if err := ioutil.WriteFile(filepath.Join(dir, "hashrings.json"), b, 0600); err != nil { - return nil, errors.Wrap(err, "creating receive config") - } - - receiver := NewService( - fmt.Sprintf("receive-%v", name), - DefaultImage(), - // TODO(bwplotka): BuildArgs should be interface. - e2e.NewCommand("receive", e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("receive-%v", name), - "--grpc-address": ":9091", - "--grpc-grace-period": "0s", - "--http-address": ":8080", - "--remote-write.address": ":8081", - "--label": fmt.Sprintf(`receive="%s"`, name), - "--tsdb.path": filepath.Join(container, "data"), - "--log.level": infoLogLevel, - "--receive.replication-factor": strconv.Itoa(replicationFactor), - "--receive.local-endpoint": localEndpoint, - "--receive.hashrings-file": filepath.Join(container, "hashrings.json"), - "--receive.hashrings-file-refresh-interval": "5s", - })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, - 9091, - 8081, - ) - receiver.SetUser(strconv.Itoa(os.Getuid())) - receiver.SetBackoff(defaultBackoffConfig) - - return receiver, nil -} - -func NewRuler(sharedDir, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config) (*Service, error) { - dir := filepath.Join(sharedDir, "data", "rule", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "rule", name) +func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config) (*e2e.InstrumentedRunnable, error) { + dir := filepath.Join(e.SharedDir(), "data", "rule", name) + container := filepath.Join(ContainerSharedDir, "data", "rule", name) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create rule dir") } @@ -479,7 +520,7 @@ func NewRuler(sharedDir, name, ruleSubDir string, amCfg []alert.AlertmanagerConf return nil, errors.Wrapf(err, "generate query file: %v", queryCfg) } - ruler := NewService( + ruler := NewService(e, fmt.Sprintf("rule-%v", name), DefaultImage(), e2e.NewCommand("rule", e2e.BuildArgs(map[string]string{ @@ -489,8 +530,8 @@ func NewRuler(sharedDir, name, ruleSubDir string, amCfg []alert.AlertmanagerConf "--http-address": ":8080", "--label": fmt.Sprintf(`replica="%s"`, name), "--data-dir": container, - "--rule-file": filepath.Join(e2e.ContainerSharedDir, ruleSubDir, "*.yaml"), - "--eval-interval": "3s", + "--rule-file": filepath.Join(ContainerSharedDir, ruleSubDir, "*.yaml"), + "--eval-interval": "1s", "--alertmanagers.config": string(amCfgBytes), "--alertmanagers.sd-dns-interval": "1s", "--log.level": infoLogLevel, @@ -498,19 +539,17 @@ func NewRuler(sharedDir, name, ruleSubDir string, amCfg []alert.AlertmanagerConf "--query.sd-dns-interval": "1s", "--resend-delay": "5s", })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, ) - ruler.SetUser(strconv.Itoa(os.Getuid())) - ruler.SetBackoff(defaultBackoffConfig) return ruler, nil } -func NewAlertmanager(sharedDir, name string) (*e2e.HTTPService, error) { - dir := filepath.Join(sharedDir, "data", "am", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "am", name) +func NewAlertmanager(e e2e.Environment, name string) (*e2e.InstrumentedRunnable, error) { + dir := filepath.Join(e.SharedDir(), "data", "am", name) + container := filepath.Join(ContainerSharedDir, "data", "am", name) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create am dir") } @@ -527,29 +566,30 @@ receivers: return nil, errors.Wrap(err, "creating alertmanager config file failed") } - s := e2e.NewHTTPService( - fmt.Sprintf("alertmanager-%v", name), - DefaultAlertmanagerImage(), - e2e.NewCommandWithoutEntrypoint("/bin/alertmanager", e2e.BuildArgs(map[string]string{ - "--config.file": filepath.Join(container, "config.yaml"), - "--web.listen-address": "0.0.0.0:8080", - "--log.level": infoLogLevel, - "--storage.path": container, - "--web.get-concurrency": "1", - "--web.timeout": "2m", - })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, + s := e2e.NewInstrumentedRunnable( + e, fmt.Sprintf("alertmanager-%v", name), map[string]int{"http": 8080}, "http").Init( + e2e.StartOptions{ + Image: DefaultAlertmanagerImage(), + Command: e2e.NewCommandWithoutEntrypoint("/bin/alertmanager", e2e.BuildArgs(map[string]string{ + "--config.file": filepath.Join(container, "config.yaml"), + "--web.listen-address": "0.0.0.0:8080", + "--log.level": infoLogLevel, + "--storage.path": container, + "--web.get-concurrency": "1", + "--web.timeout": "2m", + })...), + Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + User: strconv.Itoa(os.Geteuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, ) - s.SetUser(strconv.Itoa(os.Getuid())) - s.SetBackoff(defaultBackoffConfig) return s, nil } -func NewStoreGW(sharedDir, name string, bucketConfig client.BucketConfig, relabelConfig ...relabel.Config) (*Service, error) { - dir := filepath.Join(sharedDir, "data", "store", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "store", name) +func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, relabelConfig ...relabel.Config) (*e2e.InstrumentedRunnable, error) { + dir := filepath.Join(e.SharedDir(), "data", "store", name) + container := filepath.Join(ContainerSharedDir, "data", "store", name) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create store dir") } @@ -565,6 +605,7 @@ func NewStoreGW(sharedDir, name string, bucketConfig client.BucketConfig, relabe } store := NewService( + e, fmt.Sprintf("store-gw-%v", name), DefaultImage(), e2e.NewCommand("store", e2e.BuildArgs(map[string]string{ @@ -582,19 +623,17 @@ func NewStoreGW(sharedDir, name string, bucketConfig client.BucketConfig, relabe "--selector.relabel-config": string(relabelConfigBytes), "--consistency-delay": "30m", })...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, ) - store.SetUser(strconv.Itoa(os.Getuid())) - store.SetBackoff(defaultBackoffConfig) return store, nil } -func NewCompactor(sharedDir, name string, bucketConfig client.BucketConfig, relabelConfig []relabel.Config, extArgs ...string) (*e2e.HTTPService, error) { - dir := filepath.Join(sharedDir, "data", "compact", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "compact", name) +func NewCompactor(e e2e.Environment, name string, bucketConfig client.BucketConfig, relabelConfig []relabel.Config, extArgs ...string) (*e2e.InstrumentedRunnable, error) { + dir := filepath.Join(e.SharedDir(), "data", "compact", name) + container := filepath.Join(ContainerSharedDir, "data", "compact", name) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create compact dir") @@ -610,29 +649,30 @@ func NewCompactor(sharedDir, name string, bucketConfig client.BucketConfig, rela return nil, errors.Wrapf(err, "generate compact relabel file: %v", relabelConfig) } - compactor := e2e.NewHTTPService( - fmt.Sprintf("compact-%s", name), - DefaultImage(), - e2e.NewCommand("compact", append(e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("compact-%s", name), - "--log.level": infoLogLevel, - "--data-dir": container, - "--objstore.config": string(bktConfigBytes), - "--http-address": ":8080", - "--block-sync-concurrency": "20", - "--selector.relabel-config": string(relabelConfigBytes), - "--wait": "", - }), extArgs...)...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, + compactor := e2e.NewInstrumentedRunnable( + e, fmt.Sprintf("compact-%s", name), map[string]int{"http": 8080}, "http").Init( + e2e.StartOptions{ + Image: DefaultImage(), + Command: e2e.NewCommand("compact", append(e2e.BuildArgs(map[string]string{ + "--debug.name": fmt.Sprintf("compact-%s", name), + "--log.level": infoLogLevel, + "--data-dir": container, + "--objstore.config": string(bktConfigBytes), + "--http-address": ":8080", + "--block-sync-concurrency": "20", + "--selector.relabel-config": string(relabelConfigBytes), + "--wait": "", + }), extArgs...)...), + Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, ) - compactor.SetUser(strconv.Itoa(os.Getuid())) - compactor.SetBackoff(defaultBackoffConfig) return compactor, nil } -func NewQueryFrontend(name, downstreamURL string, cacheConfig queryfrontend.CacheProviderConfig) (*e2e.HTTPService, error) { +func NewQueryFrontend(e e2e.Environment, name, downstreamURL string, cacheConfig queryfrontend.CacheProviderConfig) (*e2e.InstrumentedRunnable, error) { cacheConfigBytes, err := yaml.Marshal(cacheConfig) if err != nil { return nil, errors.Wrapf(err, "marshal response cache config file: %v", cacheConfig) @@ -646,41 +686,118 @@ func NewQueryFrontend(name, downstreamURL string, cacheConfig queryfrontend.Cach "--query-range.response-cache-config": string(cacheConfigBytes), }) - queryFrontend := e2e.NewHTTPService( - fmt.Sprintf("query-frontend-%s", name), - DefaultImage(), - e2e.NewCommand("query-frontend", args...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, + queryFrontend := e2e.NewInstrumentedRunnable( + e, fmt.Sprintf("query-frontend-%s", name), map[string]int{"http": 8080}, "http").Init( + e2e.StartOptions{ + Image: DefaultImage(), + Command: e2e.NewCommand("query-frontend", args...), + Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, ) - queryFrontend.SetUser(strconv.Itoa(os.Getuid())) - queryFrontend.SetBackoff(defaultBackoffConfig) return queryFrontend, nil } -func NewMemcached(name string) *e2e.ConcreteService { - memcached := e2e.NewConcreteService( - fmt.Sprintf("memcached-%s", name), - "docker.io/memcached:1.6.3-alpine", - e2e.NewCommand("memcached", []string{"-m 1024", "-I 1m", "-c 1024", "-v"}...), - nil, - 11211, +func NewReverseProxy(e e2e.Environment, name, tenantID, target string) (*e2e.InstrumentedRunnable, error) { + conf := fmt.Sprintf(` +events { + worker_connections 1024; +} + +http { + server { + listen 80; + server_name _; + + location / { + proxy_set_header THANOS-TENANT %s; + proxy_pass %s; + } + } +} +`, tenantID, target) + + dir := filepath.Join(e.SharedDir(), "data", "nginx", name) + if err := os.MkdirAll(dir, 0750); err != nil { + return nil, errors.Wrap(err, "create store dir") + } + + if err := ioutil.WriteFile(filepath.Join(dir, "nginx.conf"), []byte(conf), 0600); err != nil { + return nil, errors.Wrap(err, "creating nginx config file failed") + } + + nginx := e2e.NewInstrumentedRunnable(e, fmt.Sprintf("nginx-%s", name), map[string]int{"http": 80}, "http").Init( + e2e.StartOptions{ + Image: "docker.io/nginx:1.21.1-alpine", + Volumes: []string{filepath.Join(dir, "/nginx.conf") + ":/etc/nginx/nginx.conf:ro"}, + WaitReadyBackoff: &defaultBackoffConfig, + }, + ) + + return nginx, nil +} + +// NewMinio returns minio server, used as a local replacement for S3. +// TODO(@matej-g): This is a temporary workaround for https://github.com/efficientgo/e2e/issues/11; +// after this is addresses fixed all calls should be replaced with e2edb.NewMinio. +func NewMinio(env e2e.Environment, name, bktName string) *e2e.InstrumentedRunnable { + image := "minio/minio:RELEASE.2019-12-30T05-45-39Z" + minioKESGithubContent := "https://raw.githubusercontent.com/minio/kes/master" + commands := []string{ + "curl -sSL --tlsv1.2 -O '%s/root.key' -O '%s/root.cert'", + "mkdir -p /data/%s && minio server --address :%v --quiet /data", + } + + return e2e.NewInstrumentedRunnable( + env, + name, + map[string]int{"http": 8090}, + "http").Init( + e2e.StartOptions{ + Image: image, + // Create the required bucket before starting minio. + Command: e2e.NewCommandWithoutEntrypoint("sh", "-c", fmt.Sprintf(strings.Join(commands, " && "), minioKESGithubContent, minioKESGithubContent, bktName, 8090)), + Readiness: e2e.NewHTTPReadinessProbe("http", "/minio/health/ready", 200, 200), + EnvVars: map[string]string{ + "MINIO_ACCESS_KEY": e2edb.MinioAccessKey, + "MINIO_SECRET_KEY": e2edb.MinioSecretKey, + "MINIO_BROWSER": "off", + "ENABLE_HTTPS": "0", + // https://docs.min.io/docs/minio-kms-quickstart-guide.html + "MINIO_KMS_KES_ENDPOINT": "https://play.min.io:7373", + "MINIO_KMS_KES_KEY_FILE": "root.key", + "MINIO_KMS_KES_CERT_FILE": "root.cert", + "MINIO_KMS_KES_KEY_NAME": "my-minio-key", + }, + }, + ) +} + +func NewMemcached(e e2e.Environment, name string) *e2e.InstrumentedRunnable { + memcached := e2e.NewInstrumentedRunnable(e, fmt.Sprintf("memcached-%s", name), map[string]int{"memcached": 11211}, "memcached").Init( + e2e.StartOptions{ + Image: "docker.io/memcached:1.6.3-alpine", + Command: e2e.NewCommand("memcached", []string{"-m 1024", "-I 1m", "-c 1024", "-v"}...), + User: strconv.Itoa(os.Getuid()), + WaitReadyBackoff: &defaultBackoffConfig, + }, ) - memcached.SetUser(strconv.Itoa(os.Getuid())) - memcached.SetBackoff(defaultBackoffConfig) return memcached } func NewToolsBucketWeb( + e e2e.Environment, name string, bucketConfig client.BucketConfig, routePrefix, externalPrefix string, minTime string, maxTime string, - relabelConfig string) (*Service, error) { + relabelConfig string, +) (*e2e.InstrumentedRunnable, error) { bktConfigBytes, err := yaml.Marshal(bucketConfig) if err != nil { return nil, errors.Wrapf(err, "generate tools bucket web config file: %v", bucketConfig) @@ -714,16 +831,14 @@ func NewToolsBucketWeb( args = append([]string{"bucket", "web"}, args...) - toolsBucketWeb := NewService( + toolsBucketWeb := NewService(e, fmt.Sprintf("toolsBucketWeb-%s", name), DefaultImage(), e2e.NewCommand("tools", args...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, ) - toolsBucketWeb.SetUser(strconv.Itoa(os.Getuid())) - toolsBucketWeb.SetBackoff(defaultBackoffConfig) return toolsBucketWeb, nil } diff --git a/test/e2e/exemplars_api_test.go b/test/e2e/exemplars_api_test.go index 15aa749ecd1..13aaffca851 100644 --- a/test/e2e/exemplars_api_test.go +++ b/test/e2e/exemplars_api_test.go @@ -9,9 +9,10 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/pkg/errors" "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/testutil" @@ -24,64 +25,70 @@ const ( func TestExemplarsAPI_Fanout(t *testing.T) { t.Parallel() + var ( + prom1, prom2 *e2e.InstrumentedRunnable + sidecar1, sidecar2 *e2e.InstrumentedRunnable + err error + e *e2e.DockerEnvironment + ) - netName := "e2e_test_exemplars_fanout" - - s, err := e2e.NewScenario(netName) + e, err = e2e.NewDockerEnvironment("e2e_test_exemplars_fanout") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - stores := []string{ - e2e.NetworkContainerHostPort(netName, "sidecar-prom1", 9091), // TODO(bwplotka): Use newer e2e lib to handle this in type safe manner. - e2e.NetworkContainerHostPort(netName, "sidecar-prom2", 9091), // TODO(bwplotka): Use newer e2e lib to handle this in type safe manner. - } - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "query", stores...). - WithExemplarAddresses(stores...). - WithTracingConfig(fmt.Sprintf(`type: JAEGER -config: - sampler_type: const - sampler_param: 1 - service_name: %s`, s.NetworkName()+"-query")). - Build() - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + qBuilder := e2ethanos.NewQuerierBuilder(e, "query") + qUnitiated := qBuilder.BuildUninitiated() - // Recreate Prometheus and sidecar with Thanos query scrape target. - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + prom1, sidecar1, err = e2ethanos.NewPrometheusWithSidecar( + e, "prom1", - defaultPromConfig("ha", 0, "", "", "localhost:9090", q.NetworkHTTPEndpoint()), + defaultPromConfig("ha", 0, "", "", "localhost:9090", qUnitiated.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), e2ethanos.FeatureExemplarStorage, ) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + prom2, sidecar2, err = e2ethanos.NewPrometheusWithSidecar( + e, "prom2", - defaultPromConfig("ha", 1, "", "", "localhost:9090", q.NetworkHTTPEndpoint()), + defaultPromConfig("ha", 1, "", "", "localhost:9090", qUnitiated.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), e2ethanos.FeatureExemplarStorage, ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + + tracingCfg := fmt.Sprintf(`type: JAEGER +config: + sampler_type: const + sampler_param: 1 + service_name: %s`, qUnitiated.Name()) + + stores := []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")} + + qBuilder = qBuilder.WithExemplarAddresses(stores...). + WithTracingConfig(tracingCfg) + + q, err := qBuilder.Initiate(qUnitiated, stores...) + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(q)) + + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_query_exemplar_apis_dns_provider_results"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_query_exemplar_apis_dns_provider_results"}, e2e.WaitMissingMetrics())) now := time.Now() start := timestamp.FromTime(now.Add(-time.Hour)) end := timestamp.FromTime(now.Add(time.Hour)) // Send HTTP requests to thanos query to trigger exemplars. - labelNames(t, ctx, q.HTTPEndpoint(), nil, start, end, func(res []string) bool { return true }) + labelNames(t, ctx, q.Endpoint("http"), nil, start, end, func(res []string) bool { return true }) t.Run("Basic exemplars query", func(t *testing.T) { - queryExemplars(t, ctx, q.HTTPEndpoint(), `http_request_duration_seconds_bucket{handler="label_names"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ + queryExemplars(t, ctx, q.Endpoint("http"), `http_request_duration_seconds_bucket{handler="label_names"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ "__name__": "http_request_duration_seconds_bucket", "handler": "label_names", "job": "myself", @@ -92,7 +99,7 @@ config: t.Run("Exemplars query with matched external label", func(t *testing.T) { // Here replica is an external label. - queryExemplars(t, ctx, q.HTTPEndpoint(), `http_request_duration_seconds_bucket{handler="label_names", replica="0"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ + queryExemplars(t, ctx, q.Endpoint("http"), `http_request_duration_seconds_bucket{handler="label_names", replica="0"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ "__name__": "http_request_duration_seconds_bucket", "handler": "label_names", "job": "myself", @@ -103,7 +110,7 @@ config: t.Run("Exemplars query doesn't match external label", func(t *testing.T) { // Here replica is an external label, but it doesn't match. - queryExemplars(t, ctx, q.HTTPEndpoint(), `http_request_duration_seconds_bucket{handler="label_names", replica="foo"}`, + queryExemplars(t, ctx, q.Endpoint("http"), `http_request_duration_seconds_bucket{handler="label_names", replica="foo"}`, start, end, func(data []*exemplarspb.ExemplarData) error { if len(data) > 0 { return errors.Errorf("expected no examplers, got %v", data) diff --git a/test/e2e/metadata_api_test.go b/test/e2e/metadata_api_test.go index eda8b325946..096560e64e2 100644 --- a/test/e2e/metadata_api_test.go +++ b/test/e2e/metadata_api_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" @@ -21,17 +21,14 @@ import ( func TestMetadataAPI_Fanout(t *testing.T) { t.Parallel() - netName := "e2e_test_metadata_fanout" - - s, err := e2e.NewScenario(netName) + e, err := e2e.NewDockerEnvironment("e2e_test_metadata_fanout") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // 2x Prometheus. // Each Prometheus scrapes its own metrics and Sidecar's metrics. prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom1", defaultPromConfig("ha", 0, "", "", "localhost:9090", "sidecar-prom1:8080"), e2ethanos.DefaultPrometheusImage(), @@ -39,32 +36,32 @@ func TestMetadataAPI_Fanout(t *testing.T) { testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom2", defaultPromConfig("ha", 1, "", "", "localhost:9090", "sidecar-prom2:8080"), e2ethanos.DefaultPrometheusImage(), ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) - stores := []string{sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint()} - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "query", stores...). + stores := []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")} + q, err := e2ethanos.NewQuerierBuilder( + e, "query", stores...). WithMetadataAddresses(stores...). Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_query_metadata_apis_dns_provider_results"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_query_metadata_apis_dns_provider_results"}, e2e.WaitMissingMetrics())) var promMeta map[string][]metadatapb.Meta // Wait metadata response to be ready as Prometheus gets metadata after scrape. - testutil.Ok(t, runutil.Retry(3*time.Second, ctx.Done(), func() error { - promMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+prom1.HTTPEndpoint()), "", -1) + testutil.Ok(t, runutil.Retry(5*time.Second, ctx.Done(), func() error { + promMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+prom1.Endpoint("http")), "", -1) testutil.Ok(t, err) if len(promMeta) > 0 { return nil @@ -72,7 +69,7 @@ func TestMetadataAPI_Fanout(t *testing.T) { return fmt.Errorf("empty metadata response from Prometheus") })) - thanosMeta, err := promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "", -1) + thanosMeta, err := promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "", -1) testutil.Ok(t, err) testutil.Assert(t, len(thanosMeta) > 0, "got empty metadata response from Thanos") @@ -80,22 +77,22 @@ func TestMetadataAPI_Fanout(t *testing.T) { metadataEqual(t, thanosMeta, promMeta) // We only expect to see one metadata returned. - thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "", 1) + thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "", 1) testutil.Ok(t, err) testutil.Equals(t, len(thanosMeta), 1) // We only expect to see ten metadata returned. - thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "", 10) + thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "", 10) testutil.Ok(t, err) testutil.Equals(t, len(thanosMeta), 10) // No metadata returned. - thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "", 0) + thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "", 0) testutil.Ok(t, err) testutil.Equals(t, len(thanosMeta), 0) // Only prometheus_build_info metric will be returned. - thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "prometheus_build_info", -1) + thanosMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "prometheus_build_info", -1) testutil.Ok(t, err) testutil.Assert(t, len(thanosMeta) == 1 && len(thanosMeta["prometheus_build_info"]) > 0, "expected one prometheus_build_info metadata from Thanos, got %v", thanosMeta) } diff --git a/test/e2e/query_frontend_test.go b/test/e2e/query_frontend_test.go index 37f4ea7d6b3..6635555ed0c 100644 --- a/test/e2e/query_frontend_test.go +++ b/test/e2e/query_frontend_test.go @@ -9,12 +9,13 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" + "github.com/efficientgo/e2e/matchers" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/queryfrontend" @@ -25,19 +26,19 @@ import ( func TestQueryFrontend(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_frontend") + e, err := e2e.NewDockerEnvironment("e2e_test_query_frontend") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) now := time.Now() - prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom, sidecar)) + testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", sidecar.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) inMemoryCacheConfig := queryfrontend.CacheProviderConfig{ Type: queryfrontend.INMEMORY, @@ -47,18 +48,18 @@ func TestQueryFrontend(t *testing.T) { }, } - queryFrontend, err := e2ethanos.NewQueryFrontend("1", "http://"+q.NetworkHTTPEndpoint(), inMemoryCacheConfig) + queryFrontend, err := e2ethanos.NewQueryFrontend(e, "1", "http://"+q.InternalEndpoint("http"), inMemoryCacheConfig) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(queryFrontend)) + testutil.Ok(t, e2e.StartAndWaitReady(queryFrontend)) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) // Ensure we can get the result from Querier first so that it // doesn't need to retry when we send queries to the frontend later. - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -68,14 +69,15 @@ func TestQueryFrontend(t *testing.T) { }, }) - vals, err := q.SumMetrics([]string{"http_requests_total"}, e2e.WithLabelMatchers( - labels.MustNewMatcher(labels.MatchEqual, "handler", "query"))) + vals, err := q.SumMetrics([]string{"http_requests_total"}) + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query")) + testutil.Ok(t, err) testutil.Equals(t, 1, len(vals)) queryTimes := vals[0] t.Run("query frontend works for instant query", func(t *testing.T) { - queryAndAssertSeries(t, ctx, queryFrontend.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, queryFrontend.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -88,21 +90,21 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query"))), - ) + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query")), + )) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(queryTimes+1), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "query"))), - ) + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query")), + )) }) t.Run("query frontend works for range query and it can cache results", func(t *testing.T) { rangeQuery( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), queryUpWithoutInstance, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -119,8 +121,8 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query_range"))), - ) + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query_range")), + )) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(1), "cortex_cache_fetched_keys")) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(0), "cortex_cache_hits")) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(1), "querier_cache_added_new_total")) @@ -135,8 +137,8 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "query_range"))), - ) + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query_range")), + )) }) t.Run("same range query, cache hit.", func(t *testing.T) { @@ -144,7 +146,7 @@ func TestQueryFrontend(t *testing.T) { rangeQuery( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), queryUpWithoutInstance, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -161,7 +163,7 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query_range"))), ) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(2), "cortex_cache_fetched_keys")) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(1), "cortex_cache_hits")) @@ -174,14 +176,14 @@ func TestQueryFrontend(t *testing.T) { // Query is only 2h so it won't be split. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "query_range"))), ) // One more request is needed in order to satisfy the req range. testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query_range"))), ) }) @@ -189,7 +191,7 @@ func TestQueryFrontend(t *testing.T) { rangeQuery( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), queryUpWithoutInstance, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(24*time.Hour)), @@ -206,7 +208,7 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(3), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query_range"))), ) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(3), "cortex_cache_fetched_keys")) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(2), "cortex_cache_hits")) @@ -219,94 +221,94 @@ func TestQueryFrontend(t *testing.T) { // Query is 25h so it will be split to 2 requests. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(4), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "query_range"))), ) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(4), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query_range"))), ) }) t.Run("query frontend splitting works for labels names API", func(t *testing.T) { // LabelNames and LabelValues API should still work via query frontend. - labelNames(t, ctx, queryFrontend.HTTPEndpoint(), nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelNames(t, ctx, queryFrontend.Endpoint("http"), nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) > 0 }) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "label_names"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "label_names"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "label_names"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "label_names"))), ) // Query is only 2h so it won't be split. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) - labelNames(t, ctx, queryFrontend.HTTPEndpoint(), nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelNames(t, ctx, queryFrontend.Endpoint("http"), nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) > 0 }) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(3), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "label_names"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "label_names"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "label_names"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "label_names"))), ) // Query is 25h so split to 2 requests. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(3), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) }) t.Run("query frontend splitting works for labels values API", func(t *testing.T) { - labelValues(t, ctx, queryFrontend.HTTPEndpoint(), "instance", nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelValues(t, ctx, queryFrontend.Endpoint("http"), "instance", nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 1 && res[0] == "localhost:9090" }) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "label_values"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "label_values"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "label_values"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "label_values"))), ) // Query is only 2h so it won't be split. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(4), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) - labelValues(t, ctx, queryFrontend.HTTPEndpoint(), "instance", nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelValues(t, ctx, queryFrontend.Endpoint("http"), "instance", nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 1 && res[0] == "localhost:9090" }) testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(3), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "label_values"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "label_values"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "label_values"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "label_values"))), ) // Query is 25h so split to 2 requests. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(6), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) }) @@ -314,7 +316,7 @@ func TestQueryFrontend(t *testing.T) { series( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "__name__", "up")}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -334,23 +336,23 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "series"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "series"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "series"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "series"))), ) // Query is only 2h so it won't be split. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(7), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) series( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "__name__", "up")}, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -370,17 +372,17 @@ func TestQueryFrontend(t *testing.T) { testutil.Ok(t, q.WaitSumMetricsWithOptions( e2e.Equals(3), []string{"http_requests_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "handler", "series"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "handler", "series"))), ) testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "series"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "series"))), ) // Query is only 2h so it won't be split. testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(9), []string{"thanos_frontend_split_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "tripperware", "labels"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "tripperware", "labels"))), ) }) } @@ -388,28 +390,28 @@ func TestQueryFrontend(t *testing.T) { func TestQueryFrontendMemcachedCache(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_frontend_memcached") + e, err := e2e.NewDockerEnvironment("e2e_test_query_frontend_memcached") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) now := time.Now() - prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom, sidecar)) + testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", sidecar.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) - memcached := e2ethanos.NewMemcached("1") - testutil.Ok(t, s.StartAndWaitReady(memcached)) + memcached := e2ethanos.NewMemcached(e, "1") + testutil.Ok(t, e2e.StartAndWaitReady(memcached)) memCachedConfig := queryfrontend.CacheProviderConfig{ Type: queryfrontend.MEMCACHED, Config: queryfrontend.MemcachedResponseCacheConfig{ Memcached: cacheutil.MemcachedClientConfig{ - Addresses: []string{memcached.NetworkEndpoint(11211)}, + Addresses: []string{memcached.InternalEndpoint("memcached")}, MaxIdleConnections: 100, MaxAsyncConcurrency: 20, MaxGetMultiConcurrency: 100, @@ -421,20 +423,20 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { }, } - queryFrontend, err := e2ethanos.NewQueryFrontend("1", "http://"+q.NetworkHTTPEndpoint(), memCachedConfig) + queryFrontend, err := e2ethanos.NewQueryFrontend(e, "1", "http://"+q.InternalEndpoint("http"), memCachedConfig) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(queryFrontend)) + testutil.Ok(t, e2e.StartAndWaitReady(queryFrontend)) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(1), "cortex_memcache_client_servers")) // Ensure we can get the result from Querier first so that it // doesn't need to retry when we send queries to the frontend later. - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -445,14 +447,14 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { }) vals, err := q.SumMetrics([]string{"http_requests_total"}, e2e.WithLabelMatchers( - labels.MustNewMatcher(labels.MatchEqual, "handler", "query"))) + matchers.MustNewMatcher(matchers.MatchEqual, "handler", "query"))) testutil.Ok(t, err) testutil.Equals(t, 1, len(vals)) rangeQuery( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), queryUpWithoutInstance, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -469,7 +471,7 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(1), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query_range"))), ) testutil.Ok(t, queryFrontend.WaitSumMetrics(e2e.Equals(1), "cortex_cache_fetched_keys")) @@ -482,7 +484,7 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { rangeQuery( t, ctx, - queryFrontend.HTTPEndpoint(), + queryFrontend.Endpoint("http"), queryUpWithoutInstance, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), @@ -499,7 +501,7 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { testutil.Ok(t, queryFrontend.WaitSumMetricsWithOptions( e2e.Equals(2), []string{"thanos_query_frontend_queries_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "op", "query_range"))), + e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "op", "query_range"))), ) // Query is only 2h so it won't be split. diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 8c34a282836..06c62087635 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -17,7 +17,7 @@ import ( "github.com/chromedp/cdproto/network" "github.com/chromedp/chromedp" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -97,36 +97,37 @@ func sortResults(res model.Vector) { func TestQuery(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query") + e, err := e2e.NewDockerEnvironment("e2e_test_query") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - receiver, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + receiver := e2ethanos.NewUninitiatedReceiver(e, "1") + receiverRunnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(receiver, e.SharedDir(), 1) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) + testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query", "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) // Querier. Both fileSD and directly by flags. - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint(), receiver.GRPCNetworkEndpoint()). - WithFileSDStoreAddresses(sidecar3.GRPCNetworkEndpoint(), sidecar4.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")). + WithFileSDStoreAddresses(sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(5), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(5), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -137,7 +138,7 @@ func TestQuery(t *testing.T) { { "job": "myself", "prometheus": "prom-both-remote-write-and-sidecar", - "receive": "1", + "receive": "receive-1", "replica": "1234", "tenant_id": "default-tenant", }, @@ -159,7 +160,7 @@ func TestQuery(t *testing.T) { }) // With deduplication. - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: true, }, []model.Metric{ { @@ -169,7 +170,7 @@ func TestQuery(t *testing.T) { { "job": "myself", "prometheus": "prom-both-remote-write-and-sidecar", - "receive": "1", + "receive": "receive-1", "tenant_id": "default-tenant", }, { @@ -186,35 +187,35 @@ func TestQuery(t *testing.T) { func TestQueryExternalPrefixWithoutReverseProxy(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_route_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_query_route_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "test" - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1"). + q, err := e2ethanos.NewQuerierBuilder(e, "1"). WithExternalPrefix(externalPrefix).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) - checkNetworkRequests(t, "http://"+q.HTTPEndpoint()+"/"+externalPrefix+"/graph") + checkNetworkRequests(t, "http://"+q.Endpoint("http")+"/"+externalPrefix+"/graph") } func TestQueryExternalPrefix(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_external_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_query_external_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "thanos" - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1"). + q, err := e2ethanos.NewQuerierBuilder(e, "1"). WithExternalPrefix(externalPrefix).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) - querierURL := mustURLParse(t, "http://"+q.HTTPEndpoint()+"/"+externalPrefix) + querierURL := mustURLParse(t, "http://"+q.Endpoint("http")+"/"+externalPrefix) querierProxy := httptest.NewServer(e2ethanos.NewSingleHostReverseProxy(querierURL, externalPrefix)) t.Cleanup(querierProxy.Close) @@ -225,21 +226,21 @@ func TestQueryExternalPrefix(t *testing.T) { func TestQueryExternalPrefixAndRoutePrefix(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_external_prefix_and_route_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_query_external_prefix_and_route_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "thanos" routePrefix := "test" - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1"). + q, err := e2ethanos.NewQuerierBuilder(e, "1"). WithRoutePrefix(routePrefix). WithExternalPrefix(externalPrefix). Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) - querierURL := mustURLParse(t, "http://"+q.HTTPEndpoint()+"/"+routePrefix) + querierURL := mustURLParse(t, "http://"+q.Endpoint("http")+"/"+routePrefix) querierProxy := httptest.NewServer(e2ethanos.NewSingleHostReverseProxy(querierURL, externalPrefix)) t.Cleanup(querierProxy.Close) @@ -250,38 +251,39 @@ func TestQueryExternalPrefixAndRoutePrefix(t *testing.T) { func TestQueryLabelNames(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_label_names") + e, err := e2e.NewDockerEnvironment("e2e_test_query_label_names") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - receiver, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + receiver := e2ethanos.NewUninitiatedReceiver(e, "1") + receiverRunnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(receiver, e.SharedDir(), 1) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) + testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint(), receiver.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) t.Cleanup(cancel) now := time.Now() - labelNames(t, ctx, q.HTTPEndpoint(), nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelNames(t, ctx, q.Endpoint("http"), nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) > 0 }) // Outside time range. - labelNames(t, ctx, q.HTTPEndpoint(), nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(-23*time.Hour)), func(res []string) bool { + labelNames(t, ctx, q.Endpoint("http"), nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(-23*time.Hour)), func(res []string) bool { return len(res) == 0 }) - labelNames(t, ctx, q.HTTPEndpoint(), []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "up"}}, + labelNames(t, ctx, q.Endpoint("http"), []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "up"}}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { // Expected result: [__name__, instance, job, prometheus, replica, receive, tenant_id] // Pre-labelnames pushdown we've done Select() over all series and picked out the label names hence they all had external labels. @@ -291,7 +293,7 @@ func TestQueryLabelNames(t *testing.T) { ) // There is no matched series. - labelNames(t, ctx, q.HTTPEndpoint(), []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "foobar"}}, + labelNames(t, ctx, q.Endpoint("http"), []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "foobar"}}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 0 }, @@ -301,44 +303,45 @@ func TestQueryLabelNames(t *testing.T) { func TestQueryLabelValues(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_label_values") + e, err := e2e.NewDockerEnvironment("e2e_test_query_label_values") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - receiver, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + receiver := e2ethanos.NewUninitiatedReceiver(e, "1") + receiverRunnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(receiver, e.SharedDir(), 1) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) + testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), s.NetworkName(), "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint(), receiver.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) t.Cleanup(cancel) now := time.Now() - labelValues(t, ctx, q.HTTPEndpoint(), "instance", nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { + labelValues(t, ctx, q.Endpoint("http"), "instance", nil, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 1 && res[0] == "localhost:9090" }) // Outside time range. - labelValues(t, ctx, q.HTTPEndpoint(), "instance", nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(-23*time.Hour)), func(res []string) bool { + labelValues(t, ctx, q.Endpoint("http"), "instance", nil, timestamp.FromTime(now.Add(-24*time.Hour)), timestamp.FromTime(now.Add(-23*time.Hour)), func(res []string) bool { return len(res) == 0 }) - labelValues(t, ctx, q.HTTPEndpoint(), "__name__", []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "up"}}, + labelValues(t, ctx, q.Endpoint("http"), "__name__", []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "up"}}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 1 && res[0] == "up" }, ) - labelValues(t, ctx, q.HTTPEndpoint(), "__name__", []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "foobar"}}, + labelValues(t, ctx, q.Endpoint("http"), "__name__", []*labels.Matcher{{Type: labels.MatchEqual, Name: "__name__", Value: "foobar"}}, timestamp.FromTime(now.Add(-time.Hour)), timestamp.FromTime(now.Add(time.Hour)), func(res []string) bool { return len(res) == 0 }, @@ -363,52 +366,53 @@ func TestQueryCompatibilityWithPreInfoAPI(t *testing.T) { } { i := i t.Run(fmt.Sprintf("%+v", tcase), func(t *testing.T) { - net := fmt.Sprintf("e2e_test_query_comp_query_%d", i) - s, err := e2e.NewScenario(net) + e, err := e2e.NewDockerEnvironment(fmt.Sprintf("e2e_test_query_comp_query_%d", i)) testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) promRulesSubDir := filepath.Join("rules") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), promRulesSubDir), os.ModePerm)) + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), promRulesSubDir), os.ModePerm)) // Create the abort_on_partial_response alert for Prometheus. // We don't create the warn_on_partial_response alert as Prometheus has strict yaml unmarshalling. - createRuleFile(t, filepath.Join(s.SharedDir(), promRulesSubDir, "rules.yaml"), testAlertRuleAbortOnPartialResponse) + createRuleFile(t, filepath.Join(e.SharedDir(), promRulesSubDir, "rules.yaml"), testAlertRuleAbortOnPartialResponse) + + qBuilder := e2ethanos.NewQuerierBuilder(e, "1") + qUninit := qBuilder.BuildUninitiated() p1, s1, err := e2ethanos.NewPrometheusWithSidecarCustomImage( - s.SharedDir(), - net, + e, "p1", - defaultPromConfig("p1", 0, "", filepath.Join(e2e.ContainerSharedDir, promRulesSubDir, "*.yaml"), "localhost:9090", e2e.NetworkContainerHostPort(net, "querier-1", 8080)), // TODO(bwplotka): Use newer e2e lib to handle this in type safe manner. + defaultPromConfig("p1", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml"), "localhost:9090", qUninit.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), tcase.sidecarImage, e2ethanos.FeatureExemplarStorage, ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(p1, s1)) + testutil.Ok(t, e2e.StartAndWaitReady(p1, s1)) // Newest querier with old --rules --meta etc flags. - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", s1.GRPCNetworkEndpoint()). - WithMetadataAddresses(s1.GRPCNetworkEndpoint()). - WithExemplarAddresses(s1.GRPCNetworkEndpoint()). - WithTargetAddresses(s1.GRPCNetworkEndpoint()). - WithRuleAddresses(s1.GRPCNetworkEndpoint()). + q, err := qBuilder. + WithMetadataAddresses(s1.InternalEndpoint("grpc")). + WithExemplarAddresses(s1.InternalEndpoint("grpc")). + WithTargetAddresses(s1.InternalEndpoint("grpc")). + WithRuleAddresses(s1.InternalEndpoint("grpc")). WithTracingConfig(fmt.Sprintf(`type: JAEGER config: sampler_type: const sampler_param: 1 - service_name: %s`, s.NetworkName()+"-query")). // Use fake tracing config to trigger exemplar. + service_name: %s`, qUninit.Name())). // Use fake tracing config to trigger exemplar. WithImage(tcase.queryImage). - Build() + Initiate(qUninit, s1.InternalEndpoint("grpc")) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) // We should have single TCP connection, since all APIs are against the same server. - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -425,7 +429,7 @@ config: var promMeta map[string][]metadatapb.Meta // Wait metadata response to be ready as Prometheus gets metadata after scrape. testutil.Ok(t, runutil.Retry(3*time.Second, ctx.Done(), func() error { - promMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+p1.HTTPEndpoint()), "", -1) + promMeta, err = promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+p1.Endpoint("http")), "", -1) testutil.Ok(t, err) if len(promMeta) > 0 { return nil @@ -433,7 +437,7 @@ config: return fmt.Errorf("empty metadata response from Prometheus") })) - thanosMeta, err := promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.HTTPEndpoint()), "", -1) + thanosMeta, err := promclient.NewDefaultClient().MetricMetadataInGRPC(ctx, mustURLParse(t, "http://"+q.Endpoint("http")), "", -1) testutil.Ok(t, err) testutil.Assert(t, len(thanosMeta) > 0, "got empty metadata response from Thanos") @@ -448,11 +452,11 @@ config: end := timestamp.FromTime(now.Add(time.Hour)) // Send HTTP requests to thanos query to trigger exemplars. - labelNames(t, ctx, q.HTTPEndpoint(), nil, start, end, func(res []string) bool { + labelNames(t, ctx, q.Endpoint("http"), nil, start, end, func(res []string) bool { return true }) - queryExemplars(t, ctx, q.HTTPEndpoint(), `http_request_duration_seconds_bucket{handler="label_names"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ + queryExemplars(t, ctx, q.Endpoint("http"), `http_request_duration_seconds_bucket{handler="label_names"}`, start, end, exemplarsOnExpectedSeries(map[string]string{ "__name__": "http_request_duration_seconds_bucket", "handler": "label_names", "job": "myself", @@ -463,7 +467,7 @@ config: // Targets. { - targetAndAssert(t, ctx, q.HTTPEndpoint(), "", &targetspb.TargetDiscovery{ + targetAndAssert(t, ctx, q.Endpoint("http"), "", &targetspb.TargetDiscovery{ ActiveTargets: []*targetspb.ActiveTarget{ { DiscoveredLabels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{ @@ -506,7 +510,7 @@ config: // Rules. { - ruleAndAssert(t, ctx, q.HTTPEndpoint(), "", []*rulespb.RuleGroup{ + ruleAndAssert(t, ctx, q.Endpoint("http"), "", []*rulespb.RuleGroup{ { Name: "example_abort", File: "/shared/rules/rules.yaml", @@ -575,7 +579,7 @@ func instantQuery(t *testing.T, ctx context.Context, addr, q string, opts promcl logger := log.NewLogfmtLogger(os.Stdout) logger = log.With(logger, "ts", log.DefaultTimestampUTC) - testutil.Ok(t, runutil.RetryWithLog(logger, time.Second, ctx.Done(), func() error { + testutil.Ok(t, runutil.RetryWithLog(logger, 5*time.Second, ctx.Done(), func() error { res, warnings, err := promclient.NewDefaultClient().QueryInstant(ctx, mustURLParse(t, "http://"+addr), q, time.Now(), opts) if err != nil { return err diff --git a/test/e2e/receive_test.go b/test/e2e/receive_test.go index bb335d5c66f..c41c9b7ddb1 100644 --- a/test/e2e/receive_test.go +++ b/test/e2e/receive_test.go @@ -8,11 +8,10 @@ import ( "log" "net/http" "net/http/httputil" - "net/url" "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/prometheus/common/model" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/receive" @@ -20,12 +19,6 @@ import ( "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) -type ReverseProxyConfig struct { - tenantId string - port string - target string -} - type DebugTransport struct{} func (DebugTransport) RoundTrip(r *http.Request) (*http.Response, error) { @@ -36,19 +29,6 @@ func (DebugTransport) RoundTrip(r *http.Request) (*http.Response, error) { return http.DefaultTransport.RoundTrip(r) } -func generateProxy(conf ReverseProxyConfig) { - targetURL, _ := url.Parse(conf.target) - proxy := httputil.NewSingleHostReverseProxy(targetURL) - d := proxy.Director - proxy.Director = func(r *http.Request) { - d(r) // call default director - r.Header.Add("THANOS-TENANT", conf.tenantId) - } - proxy.ErrorHandler = ErrorHandler - proxy.Transport = DebugTransport{} - log.Fatal(http.ListenAndServe(conf.port, proxy)) -} - func ErrorHandler(_ http.ResponseWriter, _ *http.Request, err error) { log.Print("Response from receiver") log.Print(err) @@ -75,31 +55,31 @@ func TestReceive(t *testing.T) { */ t.Parallel() - s, err := e2e.NewScenario("e2e_receive_single_ingestor") + e, err := e2e.NewDockerEnvironment("e2e_receive_single_ingestor") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // Setup Router Ingestor. - i, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "ingestor") + i, err := e2ethanos.NewIngestingReceiver(e, "ingestor") testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(i)) + testutil.Ok(t, e2e.StartAndWaitReady(i)) // Setup Prometheus - prom, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(i.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(i.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom)) + testutil.Ok(t, e2e.StartAndWaitReady(prom)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", i.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", i.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) // We expect the data from each Prometheus instance to be replicated twice across our ingesting instances - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -143,51 +123,51 @@ func TestReceive(t *testing.T) { */ t.Parallel() - s, err := e2e.NewScenario("e2e_receive_router_replication") + e, err := e2e.NewDockerEnvironment("e2e_receive_router_replication") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // Setup 3 ingestors. - i1, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i1") + i1, err := e2ethanos.NewIngestingReceiver(e, "i1") testutil.Ok(t, err) - i2, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i2") + i2, err := e2ethanos.NewIngestingReceiver(e, "i2") testutil.Ok(t, err) - i3, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i3") + i3, err := e2ethanos.NewIngestingReceiver(e, "i3") testutil.Ok(t, err) h := receive.HashringConfig{ Endpoints: []string{ - i1.GRPCNetworkEndpointFor(s.NetworkName()), - i2.GRPCNetworkEndpointFor(s.NetworkName()), - i3.GRPCNetworkEndpointFor(s.NetworkName()), + i1.InternalEndpoint("grpc"), + i2.InternalEndpoint("grpc"), + i3.InternalEndpoint("grpc"), }, } // Setup 1 distributor - r1, err := e2ethanos.NewRoutingReceiver(s.SharedDir(), "r1", 2, h) + r1, err := e2ethanos.NewRoutingReceiver(e, "r1", 2, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(i1, i2, i3, r1)) + testutil.Ok(t, e2e.StartAndWaitReady(i1, i2, i3, r1)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, _, err := e2ethanos.NewPrometheus(e, "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom3, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom3, _, err := e2ethanos.NewPrometheus(e, "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, prom2, prom3)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, prom2, prom3)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", i1.GRPCNetworkEndpoint(), i2.GRPCNetworkEndpoint(), i3.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", i1.InternalEndpoint("grpc"), i2.InternalEndpoint("grpc"), i3.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) expectedReplicationFactor := 2.0 - queryAndAssert(t, ctx, q.HTTPEndpoint(), "count(up) by (prometheus)", promclient.QueryOptions{ + queryAndAssert(t, ctx, q.Endpoint("http"), "count(up) by (prometheus)", promclient.QueryOptions{ Deduplicate: false, }, model.Vector{ &model.Sample{ @@ -250,57 +230,57 @@ func TestReceive(t *testing.T) { */ t.Parallel() - s, err := e2e.NewScenario("e2e_receive_routing_tree") + e, err := e2e.NewDockerEnvironment("e2e_receive_routing_tree") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // Setup ingestors. - i1, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i1") + i1, err := e2ethanos.NewIngestingReceiver(e, "i1") testutil.Ok(t, err) - i2, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i2") + i2, err := e2ethanos.NewIngestingReceiver(e, "i2") testutil.Ok(t, err) - i3, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), "i3") + i3, err := e2ethanos.NewIngestingReceiver(e, "i3") testutil.Ok(t, err) // Setup distributors - r2, err := e2ethanos.NewRoutingReceiver(s.SharedDir(), "r2", 2, receive.HashringConfig{ + r2, err := e2ethanos.NewRoutingReceiver(e, "r2", 2, receive.HashringConfig{ Endpoints: []string{ - i2.GRPCNetworkEndpointFor(s.NetworkName()), - i3.GRPCNetworkEndpointFor(s.NetworkName()), + i2.InternalEndpoint("grpc"), + i3.InternalEndpoint("grpc"), }, }) testutil.Ok(t, err) - r1, err := e2ethanos.NewRoutingReceiver(s.SharedDir(), "r1", 2, receive.HashringConfig{ + r1, err := e2ethanos.NewRoutingReceiver(e, "r1", 2, receive.HashringConfig{ Endpoints: []string{ - r2.GRPCNetworkEndpointFor(s.NetworkName()), - i1.GRPCNetworkEndpointFor(s.NetworkName()), + i1.InternalEndpoint("grpc"), + r2.InternalEndpoint("grpc"), }, }) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(i1, i2, i3, r1, r2)) + testutil.Ok(t, e2e.StartAndWaitReady(i1, i2, i3, r1, r2)) //Setup Prometheuses - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, _, err := e2ethanos.NewPrometheus(e, "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, prom2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, prom2)) //Setup Querier - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", i1.GRPCNetworkEndpoint(), i2.GRPCNetworkEndpoint(), i3.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", i1.InternalEndpoint("grpc"), i2.InternalEndpoint("grpc"), i3.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) expectedReplicationFactor := 3.0 - queryAndAssert(t, ctx, q.HTTPEndpoint(), "count(up) by (prometheus)", promclient.QueryOptions{ + queryAndAssert(t, ctx, q.Endpoint("http"), "count(up) by (prometheus)", promclient.QueryOptions{ Deduplicate: false, }, model.Vector{ &model.Sample{ @@ -355,72 +335,70 @@ func TestReceive(t *testing.T) { └───────┘ */ t.Parallel() - s, err := e2e.NewScenario("e2e_test_receive_hashring") - testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) - r1, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) - testutil.Ok(t, err) - r2, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 1) - testutil.Ok(t, err) - r3, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 1) + e, err := e2e.NewDockerEnvironment("e2e_test_receive_hashring") testutil.Ok(t, err) + t.Cleanup(e2ethanos.CleanScenario(t, e)) + + r1 := e2ethanos.NewUninitiatedReceiver(e, "1") + r2 := e2ethanos.NewUninitiatedReceiver(e, "2") + r3 := e2ethanos.NewUninitiatedReceiver(e, "3") h := receive.HashringConfig{ Endpoints: []string{ - r1.GRPCNetworkEndpointFor(s.NetworkName()), - r2.GRPCNetworkEndpointFor(s.NetworkName()), - r3.GRPCNetworkEndpointFor(s.NetworkName()), + r1.InternalEndpoint("grpc"), + r2.InternalEndpoint("grpc"), + r3.InternalEndpoint("grpc"), }, } - // Recreate again, but with hashring config. - r1, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1, h) + // Create with hashring config. + r1Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r1, e.SharedDir(), 1, h) testutil.Ok(t, err) - r2, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 1, h) + r2Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r2, e.SharedDir(), 1, h) testutil.Ok(t, err) - r3, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 1, h) + r3Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r3, e.SharedDir(), 1, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1, r2, r3)) + testutil.Ok(t, e2e.StartAndWaitReady(r1Runnable, r2Runnable, r3Runnable)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r2.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, _, err := e2ethanos.NewPrometheus(e, "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r2.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom3, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r3.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom3, _, err := e2ethanos.NewPrometheus(e, "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r3.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, prom2, prom3)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, prom2, prom3)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r1.GRPCNetworkEndpoint(), r2.GRPCNetworkEndpoint(), r3.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r1.InternalEndpoint("grpc"), r2.InternalEndpoint("grpc"), r3.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { "job": "myself", "prometheus": "prom1", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom2", - "receive": "1", + "receive": "receive-1", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom3", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, @@ -430,73 +408,70 @@ func TestReceive(t *testing.T) { t.Run("hashring with config watcher", func(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_receive_hashring_config_watcher") + e, err := e2e.NewDockerEnvironment("e2e_test_receive_hashring_config_watcher") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - r1, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) - testutil.Ok(t, err) - r2, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 1) - testutil.Ok(t, err) - r3, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 1) - testutil.Ok(t, err) + r1 := e2ethanos.NewUninitiatedReceiver(e, "1") + r2 := e2ethanos.NewUninitiatedReceiver(e, "2") + r3 := e2ethanos.NewUninitiatedReceiver(e, "3") h := receive.HashringConfig{ Endpoints: []string{ - r1.GRPCNetworkEndpointFor(s.NetworkName()), - r2.GRPCNetworkEndpointFor(s.NetworkName()), - r3.GRPCNetworkEndpointFor(s.NetworkName()), + r1.InternalEndpoint("grpc"), + r2.InternalEndpoint("grpc"), + r3.InternalEndpoint("grpc"), }, } - // Recreate again, but with hashring config. + // Create with hashring config. // TODO(kakkoyun): Update config file and wait config watcher to reconcile hashring. - r1, err = e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(s.SharedDir(), s.NetworkName(), "1", 1, h) + r1Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(r1, e.SharedDir(), 1, h) testutil.Ok(t, err) - r2, err = e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(s.SharedDir(), s.NetworkName(), "2", 1, h) + r2Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(r2, e.SharedDir(), 1, h) testutil.Ok(t, err) - r3, err = e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(s.SharedDir(), s.NetworkName(), "3", 1, h) + r3Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverWithConfigWatcher(r3, e.SharedDir(), 1, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1, r2, r3)) + testutil.Ok(t, e2e.StartAndWaitReady(r1Runnable, r2Runnable, r3Runnable)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r2.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom2, _, err := e2ethanos.NewPrometheus(e, "2", defaultPromConfig("prom2", 0, e2ethanos.RemoteWriteEndpoint(r2.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom3, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r3.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom3, _, err := e2ethanos.NewPrometheus(e, "3", defaultPromConfig("prom3", 0, e2ethanos.RemoteWriteEndpoint(r3.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, prom2, prom3)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, prom2, prom3)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r1.GRPCNetworkEndpoint(), r2.GRPCNetworkEndpoint(), r3.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r1.InternalEndpoint("grpc"), r2.InternalEndpoint("grpc"), r3.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { "job": "myself", "prometheus": "prom1", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom2", - "receive": "1", + "receive": "receive-1", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom3", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, @@ -506,72 +481,70 @@ func TestReceive(t *testing.T) { t.Run("replication", func(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_receive_replication") + e, err := e2e.NewDockerEnvironment("e2e_test_receive_replication") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // The replication suite creates three receivers but only one // receives Prometheus remote-written data. The querier queries all // receivers and the test verifies that the time series are // replicated to all of the nodes. - r1, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 3) - testutil.Ok(t, err) - r2, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 3) - testutil.Ok(t, err) - r3, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 3) - testutil.Ok(t, err) + + r1 := e2ethanos.NewUninitiatedReceiver(e, "1") + r2 := e2ethanos.NewUninitiatedReceiver(e, "2") + r3 := e2ethanos.NewUninitiatedReceiver(e, "3") h := receive.HashringConfig{ Endpoints: []string{ - r1.GRPCNetworkEndpointFor(s.NetworkName()), - r2.GRPCNetworkEndpointFor(s.NetworkName()), - r3.GRPCNetworkEndpointFor(s.NetworkName()), + r1.InternalEndpoint("grpc"), + r2.InternalEndpoint("grpc"), + r3.InternalEndpoint("grpc"), }, } - // Recreate again, but with hashring config. - r1, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 3, h) + // Create with hashring config. + r1Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r1, e.SharedDir(), 3, h) testutil.Ok(t, err) - r2, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 3, h) + r2Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r2, e.SharedDir(), 3, h) testutil.Ok(t, err) - r3, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 3, h) + r3Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r3, e.SharedDir(), 3, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1, r2, r3)) + testutil.Ok(t, e2e.StartAndWaitReady(r1Runnable, r2Runnable, r3Runnable)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r1.GRPCNetworkEndpoint(), r2.GRPCNetworkEndpoint(), r3.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r1.InternalEndpoint("grpc"), r2.InternalEndpoint("grpc"), r3.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { "job": "myself", "prometheus": "prom1", - "receive": "1", + "receive": "receive-1", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom1", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom1", - "receive": "3", + "receive": "receive-3", "replica": "0", "tenant_id": "default-tenant", }, @@ -581,62 +554,60 @@ func TestReceive(t *testing.T) { t.Run("replication_with_outage", func(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_receive_replication_with_outage") + e, err := e2e.NewDockerEnvironment("e2e_test_receive_replication_with_outage") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // The replication suite creates a three-node hashring but one of the // receivers is dead. In this case, replication should still // succeed and the time series should be replicated to the other nodes. - r1, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 3) - testutil.Ok(t, err) - r2, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 3) - testutil.Ok(t, err) - notRunningR3, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "3", 3) - testutil.Ok(t, err) + + r1 := e2ethanos.NewUninitiatedReceiver(e, "1") + r2 := e2ethanos.NewUninitiatedReceiver(e, "2") + r3 := e2ethanos.NewUninitiatedReceiver(e, "3") h := receive.HashringConfig{ Endpoints: []string{ - r1.GRPCNetworkEndpointFor(s.NetworkName()), - r2.GRPCNetworkEndpointFor(s.NetworkName()), - notRunningR3.GRPCNetworkEndpointFor(s.NetworkName()), + r1.InternalEndpoint("grpc"), + r2.InternalEndpoint("grpc"), + r3.InternalEndpoint("grpc"), }, } - // Recreate again, but with hashring config. - r1, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 3, h) + // Create with hashring config. + r1Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r1, e.SharedDir(), 3, h) testutil.Ok(t, err) - r2, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "2", 3, h) + r2Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r2, e.SharedDir(), 3, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1, r2)) + testutil.Ok(t, e2e.StartAndWaitReady(r1Runnable, r2Runnable)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, e2ethanos.RemoteWriteEndpoint(r1.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r1.GRPCNetworkEndpoint(), r2.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r1.InternalEndpoint("grpc"), r2.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { "job": "myself", "prometheus": "prom1", - "receive": "1", + "receive": "receive-1", "replica": "0", "tenant_id": "default-tenant", }, { "job": "myself", "prometheus": "prom1", - "receive": "2", + "receive": "receive-2", "replica": "0", "tenant_id": "default-tenant", }, @@ -646,70 +617,56 @@ func TestReceive(t *testing.T) { t.Run("multitenancy", func(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_for_multitenancy") + e, err := e2e.NewDockerEnvironment("e2e_test_for_multitenancy") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - // The replication suite creates a three-node hashring but one of the - // receivers is dead. In this case, replication should still - // succeed and the time series should be replicated to the other nodes. - r1, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) - testutil.Ok(t, err) + r1 := e2ethanos.NewUninitiatedReceiver(e, "1") h := receive.HashringConfig{ Endpoints: []string{ - r1.GRPCNetworkEndpointFor(s.NetworkName()), + r1.InternalEndpoint("grpc"), }, } - // Recreate again, but with hashring config. - r1, err = e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1, h) + // Create with hashring config. + r1Runnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(r1, e.SharedDir(), 1, h) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1)) - testutil.Ok(t, err) - - conf1 := ReverseProxyConfig{ - tenantId: "tenant-1", - port: ":9097", - target: "http://" + r1.Endpoint(8081), - } - conf2 := ReverseProxyConfig{ - tenantId: "tenant-2", - port: ":9098", - target: "http://" + r1.Endpoint(8081), - } + testutil.Ok(t, e2e.StartAndWaitReady(r1Runnable)) - go generateProxy(conf1) - go generateProxy(conf2) + rp1, err := e2ethanos.NewReverseProxy(e, "1", "tenant-1", "http://"+r1.InternalEndpoint("remote-write")) + testutil.Ok(t, err) + rp2, err := e2ethanos.NewReverseProxy(e, "2", "tenant-2", "http://"+r1.InternalEndpoint("remote-write")) + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(rp1, rp2)) - prom1, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom1", 0, "http://172.17.0.1:9097/api/v1/receive", ""), e2ethanos.DefaultPrometheusImage()) + prom1, _, err := e2ethanos.NewPrometheus(e, "1", defaultPromConfig("prom1", 0, "http://"+rp1.InternalEndpoint("http")+"/api/v1/receive", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - prom2, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "2", defaultPromConfig("prom1", 0, "http://172.17.0.1:9098/api/v1/receive", ""), e2ethanos.DefaultPrometheusImage()) + prom2, _, err := e2ethanos.NewPrometheus(e, "2", defaultPromConfig("prom2", 0, "http://"+rp2.InternalEndpoint("http")+"/api/v1/receive", ""), e2ethanos.DefaultPrometheusImage()) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1)) - testutil.Ok(t, s.StartAndWaitReady(prom2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, prom2)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r1.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r1.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { "job": "myself", "prometheus": "prom1", - "receive": "1", + "receive": "receive-1", "replica": "0", "tenant_id": "tenant-1", }, { "job": "myself", - "prometheus": "prom1", - "receive": "1", + "prometheus": "prom2", + "receive": "receive-1", "replica": "0", "tenant_id": "tenant-2", }, diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index fb7289ca906..633d70b1ed8 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "gopkg.in/yaml.v2" @@ -127,7 +127,7 @@ func reloadRulesHTTP(t *testing.T, ctx context.Context, endpoint string) { testutil.Equals(t, 200, resp.StatusCode) } -func reloadRulesSignal(t *testing.T, r *e2ethanos.Service) { +func reloadRulesSignal(t *testing.T, r *e2e.InstrumentedRunnable) { c := e2e.NewCommand("kill", "-1", "1") _, _, err := r.Exec(c) testutil.Ok(t, err) @@ -197,41 +197,41 @@ func writeTargets(t *testing.T, path string, addrs ...string) { func TestRule(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_rule") + e, err := e2e.NewDockerEnvironment("e2e_test_rule") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) t.Cleanup(cancel) // Prepare work dirs. rulesSubDir := filepath.Join("rules") - rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) + rulesPath := filepath.Join(e.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) createRuleFiles(t, rulesPath) amTargetsSubDir := filepath.Join("rules_am_targets") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), amTargetsSubDir), os.ModePerm)) + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), amTargetsSubDir), os.ModePerm)) queryTargetsSubDir := filepath.Join("rules_query_targets") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), queryTargetsSubDir), os.ModePerm)) - am1, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") + am1, err := e2ethanos.NewAlertmanager(e, "1") testutil.Ok(t, err) - am2, err := e2ethanos.NewAlertmanager(s.SharedDir(), "2") + am2, err := e2ethanos.NewAlertmanager(e, "2") testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(am1, am2)) + testutil.Ok(t, e2e.StartAndWaitReady(am1, am2)) - r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ + r, err := e2ethanos.NewRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ FileSDConfigs: []http_util.FileSDConfig{ { // FileSD which will be used to register discover dynamically am1. - Files: []string{filepath.Join(e2e.ContainerSharedDir, amTargetsSubDir, "*.yaml")}, + Files: []string{filepath.Join(e2ethanos.ContainerSharedDir, amTargetsSubDir, "*.yaml")}, RefreshInterval: model.Duration(time.Second), }, }, StaticAddresses: []string{ - am2.NetworkHTTPEndpoint(), + am2.InternalEndpoint("http"), }, Scheme: "http", }, @@ -245,7 +245,7 @@ func TestRule(t *testing.T) { FileSDConfigs: []http_util.FileSDConfig{ { // FileSD which will be used to register discover dynamically q. - Files: []string{filepath.Join(e2e.ContainerSharedDir, queryTargetsSubDir, "*.yaml")}, + Files: []string{filepath.Join(e2ethanos.ContainerSharedDir, queryTargetsSubDir, "*.yaml")}, RefreshInterval: model.Duration(time.Second), }, }, @@ -254,11 +254,11 @@ func TestRule(t *testing.T) { }, }) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r)) + testutil.Ok(t, e2e.StartAndWaitReady(r)) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", r.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", r.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) t.Run("no query configured", func(t *testing.T) { // Check for a few evaluations, check all of them failed. @@ -272,9 +272,9 @@ func TestRule(t *testing.T) { var currentFailures float64 t.Run("attach query", func(t *testing.T) { // Attach querier to target files. - writeTargets(t, filepath.Join(s.SharedDir(), queryTargetsSubDir, "targets.yaml"), q.NetworkHTTPEndpoint()) + writeTargets(t, filepath.Join(e.SharedDir(), queryTargetsSubDir, "targets.yaml"), q.InternalEndpoint("http")) - testutil.Ok(t, r.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_rule_query_apis_dns_provider_results"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, r.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"thanos_rule_query_apis_dns_provider_results"}, e2e.WaitMissingMetrics())) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_alertmanagers_dns_provider_results")) var currentVal float64 @@ -305,7 +305,7 @@ func TestRule(t *testing.T) { }) t.Run("attach am1", func(t *testing.T) { // Attach am1 to target files. - writeTargets(t, filepath.Join(s.SharedDir(), amTargetsSubDir, "targets.yaml"), am1.NetworkHTTPEndpoint()) + writeTargets(t, filepath.Join(e.SharedDir(), amTargetsSubDir, "targets.yaml"), am1.InternalEndpoint("http")) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_query_apis_dns_provider_results")) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(2), "thanos_rule_alertmanagers_dns_provider_results")) @@ -329,7 +329,7 @@ func TestRule(t *testing.T) { }) t.Run("am1 drops again", func(t *testing.T) { - testutil.Ok(t, os.RemoveAll(filepath.Join(s.SharedDir(), amTargetsSubDir, "targets.yaml"))) + testutil.Ok(t, os.RemoveAll(filepath.Join(e.SharedDir(), amTargetsSubDir, "targets.yaml"))) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_query_apis_dns_provider_results")) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_alertmanagers_dns_provider_results")) @@ -356,83 +356,85 @@ func TestRule(t *testing.T) { testutil.Ok(t, am1.WaitSumMetrics(e2e.Equals(currentValAm1), "alertmanager_alerts_received_total")) }) - t.Run("duplicate am ", func(t *testing.T) { + t.Run("duplicate am", func(t *testing.T) { // am2 is already registered in static addresses. - writeTargets(t, filepath.Join(s.SharedDir(), amTargetsSubDir, "targets.yaml"), am2.NetworkHTTPEndpoint()) + writeTargets(t, filepath.Join(e.SharedDir(), amTargetsSubDir, "targets.yaml"), am2.InternalEndpoint("http")) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_query_apis_dns_provider_results")) testutil.Ok(t, r.WaitSumMetrics(e2e.Equals(1), "thanos_rule_alertmanagers_dns_provider_results")) }) t.Run("rule groups have last evaluation and evaluation duration set", func(t *testing.T) { - rulegroupCorrectData(t, ctx, r.HTTPEndpoint()) + rulegroupCorrectData(t, ctx, r.Endpoint("http")) }) t.Run("signal reload works", func(t *testing.T) { // Add a new rule via sending sighup createRuleFile(t, fmt.Sprintf("%s/newrule.yaml", rulesPath), testAlertRuleAddedLaterSignal) reloadRulesSignal(t, r) - checkReloadSuccessful(t, ctx, r.HTTPEndpoint(), 4) + checkReloadSuccessful(t, ctx, r.Endpoint("http"), 4) }) t.Run("http reload works", func(t *testing.T) { // Add a new rule via /-/reload. createRuleFile(t, fmt.Sprintf("%s/newrule.yaml", rulesPath), testAlertRuleAddedLaterWebHandler) - reloadRulesHTTP(t, ctx, r.HTTPEndpoint()) - checkReloadSuccessful(t, ctx, r.HTTPEndpoint(), 3) + reloadRulesHTTP(t, ctx, r.Endpoint("http")) + checkReloadSuccessful(t, ctx, r.Endpoint("http"), 3) }) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "ALERTS", promclient.QueryOptions{ - Deduplicate: false, - }, []model.Metric{ - { - "__name__": "ALERTS", - "severity": "page", - "alertname": "TestAlert_AbortOnPartialResponse", - "alertstate": "firing", - "replica": "1", - }, - { - "__name__": "ALERTS", - "severity": "page", - "alertname": "TestAlert_HasBeenLoadedViaWebHandler", - "alertstate": "firing", - "replica": "1", - }, - { - "__name__": "ALERTS", - "severity": "page", - "alertname": "TestAlert_WarnOnPartialResponse", - "alertstate": "firing", - "replica": "1", - }, - }) + t.Run("query alerts", func(t *testing.T) { + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "ALERTS", promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "__name__": "ALERTS", + "severity": "page", + "alertname": "TestAlert_AbortOnPartialResponse", + "alertstate": "firing", + "replica": "1", + }, + { + "__name__": "ALERTS", + "severity": "page", + "alertname": "TestAlert_HasBeenLoadedViaWebHandler", + "alertstate": "firing", + "replica": "1", + }, + { + "__name__": "ALERTS", + "severity": "page", + "alertname": "TestAlert_WarnOnPartialResponse", + "alertstate": "firing", + "replica": "1", + }, + }) - expAlertLabels := []model.LabelSet{ - { - "severity": "page", - "alertname": "TestAlert_AbortOnPartialResponse", - "replica": "1", - }, - { - "severity": "page", - "alertname": "TestAlert_HasBeenLoadedViaWebHandler", - "replica": "1", - }, - { - "severity": "page", - "alertname": "TestAlert_WarnOnPartialResponse", - "replica": "1", - }, - } + expAlertLabels := []model.LabelSet{ + { + "severity": "page", + "alertname": "TestAlert_AbortOnPartialResponse", + "replica": "1", + }, + { + "severity": "page", + "alertname": "TestAlert_HasBeenLoadedViaWebHandler", + "replica": "1", + }, + { + "severity": "page", + "alertname": "TestAlert_WarnOnPartialResponse", + "replica": "1", + }, + } - alrts, err := promclient.NewDefaultClient().AlertmanagerAlerts(ctx, mustURLParse(t, "http://"+am2.HTTPEndpoint())) - testutil.Ok(t, err) + alrts, err := promclient.NewDefaultClient().AlertmanagerAlerts(ctx, mustURLParse(t, "http://"+am2.Endpoint("http"))) + testutil.Ok(t, err) - testutil.Equals(t, len(expAlertLabels), len(alrts)) - for i, a := range alrts { - testutil.Assert(t, a.Labels.Equal(expAlertLabels[i]), "unexpected labels %s", a.Labels) - } + testutil.Equals(t, len(expAlertLabels), len(alrts)) + for i, a := range alrts { + testutil.Assert(t, a.Labels.Equal(expAlertLabels[i]), "unexpected labels %s", a.Labels) + } + }) } // Test Ruler behavior on different storepb.PartialResponseStrategy when having partial response from single `failingStoreAPI`. diff --git a/test/e2e/rules_api_test.go b/test/e2e/rules_api_test.go index 86f97cdf181..f55a1175ae9 100644 --- a/test/e2e/rules_api_test.go +++ b/test/e2e/rules_api_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/go-kit/kit/log" "github.com/pkg/errors" @@ -30,76 +30,67 @@ import ( func TestRulesAPI_Fanout(t *testing.T) { t.Parallel() - netName := "e2e_test_rules_fanout" - - s, err := e2e.NewScenario(netName) + e, err := e2e.NewDockerEnvironment("e2e_test_rules_fanout") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) promRulesSubDir := filepath.Join("rules") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), promRulesSubDir), os.ModePerm)) + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), promRulesSubDir), os.ModePerm)) // Create the abort_on_partial_response alert for Prometheus. // We don't create the warn_on_partial_response alert as Prometheus has strict yaml unmarshalling. - createRuleFile(t, filepath.Join(s.SharedDir(), promRulesSubDir, "rules.yaml"), testAlertRuleAbortOnPartialResponse) + createRuleFile(t, filepath.Join(e.SharedDir(), promRulesSubDir, "rules.yaml"), testAlertRuleAbortOnPartialResponse) thanosRulesSubDir := filepath.Join("thanos-rules") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), thanosRulesSubDir), os.ModePerm)) - createRuleFiles(t, filepath.Join(s.SharedDir(), thanosRulesSubDir)) + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), thanosRulesSubDir), os.ModePerm)) + createRuleFiles(t, filepath.Join(e.SharedDir(), thanosRulesSubDir)) // 2x Prometheus. prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom1", - defaultPromConfig("ha", 0, "", filepath.Join(e2e.ContainerSharedDir, promRulesSubDir, "*.yaml")), + defaultPromConfig("ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml")), e2ethanos.DefaultPrometheusImage(), ) testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom2", - defaultPromConfig("ha", 1, "", filepath.Join(e2e.ContainerSharedDir, promRulesSubDir, "*.yaml")), + defaultPromConfig("ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml")), e2ethanos.DefaultPrometheusImage(), ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) - // 2x Rulers. - r1, err := e2ethanos.NewRuler(s.SharedDir(), "rule1", thanosRulesSubDir, nil, nil) - testutil.Ok(t, err) - r2, err := e2ethanos.NewRuler(s.SharedDir(), "rule2", thanosRulesSubDir, nil, nil) - testutil.Ok(t, err) - - stores := []string{sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint(), r1.NetworkEndpointFor(s.NetworkName(), 9091), r2.NetworkEndpointFor(s.NetworkName(), 9091)} - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "query", stores...). - WithRuleAddresses(stores...). - Build() - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + qBuilder := e2ethanos.NewQuerierBuilder(e, "query") + qUninit := qBuilder.BuildUninitiated() queryCfg := []query.Config{ { EndpointsConfig: http_util.EndpointsConfig{ - StaticAddresses: []string{q.NetworkHTTPEndpoint()}, + StaticAddresses: []string{qUninit.InternalEndpoint("http")}, Scheme: "http", }, }, } // Recreate rulers with the corresponding query config. - r1, err = e2ethanos.NewRuler(s.SharedDir(), "rule1", thanosRulesSubDir, nil, queryCfg) + r1, err := e2ethanos.NewRuler(e, "rule1", thanosRulesSubDir, nil, queryCfg) + testutil.Ok(t, err) + r2, err := e2ethanos.NewRuler(e, "rule2", thanosRulesSubDir, nil, queryCfg) testutil.Ok(t, err) - r2, err = e2ethanos.NewRuler(s.SharedDir(), "rule2", thanosRulesSubDir, nil, queryCfg) + testutil.Ok(t, e2e.StartAndWaitReady(r1, r2)) + + stores := []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), r1.InternalEndpoint("grpc"), r2.InternalEndpoint("grpc")} + q, err := qBuilder.WithRuleAddresses(stores...).Initiate(qUninit, stores...) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r1, r2)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(4), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(4), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - ruleAndAssert(t, ctx, q.HTTPEndpoint(), "", []*rulespb.RuleGroup{ + ruleAndAssert(t, ctx, q.Endpoint("http"), "", []*rulespb.RuleGroup{ { Name: "example_abort", File: "/shared/rules/rules.yaml", diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index 92f51ac93cc..06b3b75f8a7 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" - e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" "github.com/go-kit/kit/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -36,20 +36,21 @@ import ( func TestStoreGateway(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_store_gateway") + e, err := e2e.NewDockerEnvironment("e2e_test_store_gateway") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - m := e2edb.NewMinio(8080, "thanos") - testutil.Ok(t, s.StartAndWaitReady(m)) + const bucket = "store_gateway_test" + m := e2ethanos.NewMinio(e, "thanos-minio", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) - s1, err := e2ethanos.NewStoreGW(s.SharedDir(), "1", client.BucketConfig{ + s1, err := e2ethanos.NewStoreGW(e, "1", client.BucketConfig{ Type: client.S3, Config: s3.Config{ - Bucket: "thanos", + Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.InternalEndpoint("http"), Insecure: true, }, }, relabel.Config{ @@ -58,16 +59,16 @@ func TestStoreGateway(t *testing.T) { SourceLabels: model.LabelNames{"ext1"}, }) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(s1)) + testutil.Ok(t, e2e.StartAndWaitReady(s1)) // Ensure bucket UI. - ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(s1.HTTPEndpoint(), "loaded")) + ensureGETStatusCode(t, http.StatusOK, "http://"+path.Join(s1.Endpoint("http"), "loaded")) - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", s1.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) - dir := filepath.Join(s.SharedDir(), "tmp") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), dir), os.ModePerm)) + dir := filepath.Join(e.SharedDir(), "tmp") + testutil.Ok(t, os.MkdirAll(filepath.Join(e.SharedDir(), dir), os.ModePerm)) series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} extLset := labels.FromStrings("ext1", "value1", "replica", "1") @@ -75,7 +76,7 @@ func TestStoreGateway(t *testing.T) { extLset3 := labels.FromStrings("ext1", "value2", "replica", "3") extLset4 := labels.FromStrings("ext1", "value1", "replica", "3") - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) t.Cleanup(cancel) now := time.Now() @@ -89,10 +90,10 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, err) l := log.NewLogfmtLogger(os.Stdout) bkt, err := s3.NewBucketWithConfig(l, s3.Config{ - Bucket: "thanos", + Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.HTTPEndpoint(), // We need separate client config, when connecting to minio from outside. + Endpoint: m.Endpoint("http"), // We need separate client config, when connecting to minio from outside. Insecure: true, }, "test-feed") testutil.Ok(t, err) @@ -112,7 +113,7 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_load_failures_total")) t.Run("query works", func(t *testing.T) { - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "{a=\"1\"}", + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", promclient.QueryOptions{ Deduplicate: false, }, @@ -137,7 +138,7 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(6), "thanos_bucket_store_series_data_fetched")) testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(2), "thanos_bucket_store_series_blocks_queried")) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "{a=\"1\"}", + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", promclient.QueryOptions{ Deduplicate: true, }, @@ -167,7 +168,7 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_load_failures_total")) // TODO(bwplotka): Entries are still in LRU cache. - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "{a=\"1\"}", + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", promclient.QueryOptions{ Deduplicate: false, }, @@ -196,7 +197,7 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(1), "thanos_bucket_store_block_drops_total")) testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_load_failures_total")) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "{a=\"1\"}", + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", promclient.QueryOptions{ Deduplicate: false, }, @@ -229,7 +230,7 @@ func TestStoreGateway(t *testing.T) { testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(1+1), "thanos_bucket_store_block_drops_total")) testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_load_failures_total")) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), "{a=\"1\"}", + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", promclient.QueryOptions{ Deduplicate: false, }, diff --git a/test/e2e/targets_api_test.go b/test/e2e/targets_api_test.go index 7b8bb33fb1f..a3b2d4a6156 100644 --- a/test/e2e/targets_api_test.go +++ b/test/e2e/targets_api_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" + "github.com/efficientgo/e2e" "github.com/go-kit/kit/log" "github.com/pkg/errors" @@ -29,44 +29,40 @@ func TestTargetsAPI_Fanout(t *testing.T) { t.Parallel() - netName := "e2e_test_targets_fanout" - - s, err := e2e.NewScenario(netName) + e, err := e2e.NewDockerEnvironment("e2e_test_targets_fanout") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // 2x Prometheus. prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom1", defaultPromConfig("ha", 0, "", "", "localhost:9090", "localhost:80"), e2ethanos.DefaultPrometheusImage(), ) testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( - s.SharedDir(), - netName, + e, "prom2", defaultPromConfig("ha", 1, "", "", "localhost:9090", "localhost:80"), e2ethanos.DefaultPrometheusImage(), ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) - stores := []string{sidecar1.GRPCNetworkEndpoint(), sidecar2.GRPCNetworkEndpoint()} - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "query", stores...). + stores := []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")} + q, err := e2ethanos.NewQuerierBuilder(e, "query", stores...). WithTargetAddresses(stores...). Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(2), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - targetAndAssert(t, ctx, q.HTTPEndpoint(), "", &targetspb.TargetDiscovery{ + targetAndAssert(t, ctx, q.Endpoint("http"), "", &targetspb.TargetDiscovery{ ActiveTargets: []*targetspb.ActiveTarget{ { DiscoveredLabels: labelpb.ZLabelSet{Labels: []labelpb.ZLabel{ diff --git a/test/e2e/tools_bucket_web_test.go b/test/e2e/tools_bucket_web_test.go index 97d6c0db009..0c9f03bac60 100644 --- a/test/e2e/tools_bucket_web_test.go +++ b/test/e2e/tools_bucket_web_test.go @@ -15,8 +15,8 @@ import ( "testing" "time" - "github.com/cortexproject/cortex/integration/e2e" - e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" "github.com/go-kit/kit/log" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" @@ -25,6 +25,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) @@ -32,26 +33,29 @@ import ( func TestToolsBucketWebExternalPrefixWithoutReverseProxy(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_tools_bucket_web_route_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_tools_bucket_web_route_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "testThanos" - m := e2edb.NewMinio(8080, "thanos") - testutil.Ok(t, s.StartAndWaitReady(m)) + + const bucket = "compact_test" + m := e2ethanos.NewMinio(e, "thanos", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) svcConfig := client.BucketConfig{ Type: client.S3, Config: s3.Config{ - Bucket: "thanos", + Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.Endpoint("http"), Insecure: true, }, } b, err := e2ethanos.NewToolsBucketWeb( + e, "1", svcConfig, "", @@ -61,22 +65,22 @@ func TestToolsBucketWebExternalPrefixWithoutReverseProxy(t *testing.T) { "", ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(b)) + testutil.Ok(t, e2e.StartAndWaitReady(b)) - checkNetworkRequests(t, "http://"+b.HTTPEndpoint()+"/"+externalPrefix+"/blocks") + checkNetworkRequests(t, "http://"+b.Endpoint("http")+"/"+externalPrefix+"/blocks") } func TestToolsBucketWebExternalPrefix(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_tools_bucket_web_external_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_tools_bucket_web_external_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "testThanos" const bucket = "toolsBucketWeb_test" - m := e2edb.NewMinio(8080, bucket) - testutil.Ok(t, s.StartAndWaitReady(m)) + m := e2ethanos.NewMinio(e, "thanos", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) svcConfig := client.BucketConfig{ Type: client.S3, @@ -84,12 +88,13 @@ func TestToolsBucketWebExternalPrefix(t *testing.T) { Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.Endpoint("http"), Insecure: true, }, } b, err := e2ethanos.NewToolsBucketWeb( + e, "1", svcConfig, "", @@ -99,9 +104,9 @@ func TestToolsBucketWebExternalPrefix(t *testing.T) { "", ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(b)) + testutil.Ok(t, e2e.StartAndWaitReady(b)) - toolsBucketWebURL := mustURLParse(t, "http://"+b.HTTPEndpoint()+"/"+externalPrefix) + toolsBucketWebURL := mustURLParse(t, "http://"+b.Endpoint("http")+"/"+externalPrefix) toolsBucketWebProxy := httptest.NewServer(e2ethanos.NewSingleHostReverseProxy(toolsBucketWebURL, externalPrefix)) t.Cleanup(toolsBucketWebProxy.Close) @@ -112,15 +117,15 @@ func TestToolsBucketWebExternalPrefix(t *testing.T) { func TestToolsBucketWebExternalPrefixAndRoutePrefix(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_tools_bucket_web_and_route_prefix") + e, err := e2e.NewDockerEnvironment("e2e_test_tools_bucket_web_and_route_prefix") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) externalPrefix := "testThanos" routePrefix := "test" const bucket = "toolsBucketWeb_test" - m := e2edb.NewMinio(8080, bucket) - testutil.Ok(t, s.StartAndWaitReady(m)) + m := e2ethanos.NewMinio(e, "thanos", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) svcConfig := client.BucketConfig{ Type: client.S3, @@ -128,12 +133,13 @@ func TestToolsBucketWebExternalPrefixAndRoutePrefix(t *testing.T) { Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.Endpoint("http"), Insecure: true, }, } b, err := e2ethanos.NewToolsBucketWeb( + e, "1", svcConfig, routePrefix, @@ -143,9 +149,9 @@ func TestToolsBucketWebExternalPrefixAndRoutePrefix(t *testing.T) { "", ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(b)) + testutil.Ok(t, e2e.StartAndWaitReady(b)) - toolsBucketWebURL := mustURLParse(t, "http://"+b.HTTPEndpoint()+"/"+routePrefix) + toolsBucketWebURL := mustURLParse(t, "http://"+b.Endpoint("http")+"/"+routePrefix) toolsBucketWebProxy := httptest.NewServer(e2ethanos.NewSingleHostReverseProxy(toolsBucketWebURL, externalPrefix)) t.Cleanup(toolsBucketWebProxy.Close) @@ -156,25 +162,25 @@ func TestToolsBucketWebExternalPrefixAndRoutePrefix(t *testing.T) { func TestToolsBucketWebWithTimeAndRelabelFilter(t *testing.T) { t.Parallel() // Create network. - s, err := e2e.NewScenario("e2e_test_tools_bucket_web_time_and_relabel_filter") + e, err := e2e.NewDockerEnvironment("e2e_test_tools_bucket_web_time_and_relabel_filter") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) // Create Minio. const bucket = "toolsBucketWeb_test" - m := e2edb.NewMinio(8080, bucket) - testutil.Ok(t, s.StartAndWaitReady(m)) + m := e2ethanos.NewMinio(e, "thanos", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) // Create bucket. logger := log.NewLogfmtLogger(os.Stdout) bkt, err := s3.NewBucketWithConfig(logger, s3.Config{ Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.HTTPEndpoint(), + Endpoint: m.Endpoint("http"), Insecure: true, }, "tools") testutil.Ok(t, err) // Create share dir for upload. - dir := filepath.Join(s.SharedDir(), "tmp") + dir := filepath.Join(e.SharedDir(), "tmp") testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) // Upload blocks. now, err := time.Parse(time.RFC3339, "2021-07-24T08:00:00Z") @@ -200,9 +206,14 @@ func TestToolsBucketWebWithTimeAndRelabelFilter(t *testing.T) { }, } for _, b := range blocks { - id, err := b.Create(context.Background(), dir, 0, b.hashFunc) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + id, err := b.Create(ctx, dir, 0, b.hashFunc) testutil.Ok(t, err) - testutil.Ok(t, objstore.UploadDir(context.Background(), logger, bkt, path.Join(dir, id.String()), id.String())) + testutil.Ok(t, runutil.Retry(time.Second, ctx.Done(), func() error { + return objstore.UploadDir(ctx, logger, bkt, path.Join(dir, id.String()), id.String()) + })) } // Start thanos tool bucket web. svcConfig := client.BucketConfig{ @@ -211,11 +222,12 @@ func TestToolsBucketWebWithTimeAndRelabelFilter(t *testing.T) { Bucket: bucket, AccessKey: e2edb.MinioAccessKey, SecretKey: e2edb.MinioSecretKey, - Endpoint: m.NetworkHTTPEndpoint(), + Endpoint: m.InternalEndpoint("http"), Insecure: true, }, } b, err := e2ethanos.NewToolsBucketWeb( + e, "1", svcConfig, "", @@ -228,9 +240,9 @@ func TestToolsBucketWebWithTimeAndRelabelFilter(t *testing.T) { source_labels: ["tenant_id"]`, ) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(b)) + testutil.Ok(t, e2e.StartAndWaitReady(b)) // Request blocks api. - resp, err := http.DefaultClient.Get("http://" + b.HTTPEndpoint() + "/api/v1/blocks") + resp, err := http.DefaultClient.Get("http://" + b.Endpoint("http") + "/api/v1/blocks") testutil.Ok(t, err) testutil.Equals(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() From 35b88ab3c84c5c82fb4ecd2968ea744c5ff2428b Mon Sep 17 00:00:00 2001 From: Tathagata Paul Date: Fri, 1 Oct 2021 12:18:32 +0530 Subject: [PATCH 10/33] Fix bucket tools to dry run with multiple block ids (#4716) Signed-off-by: 4molybdenum2 --- cmd/thanos/tools_bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/thanos/tools_bucket.go b/cmd/thanos/tools_bucket.go index cacd26eb88d..9c0c837a69f 100644 --- a/cmd/thanos/tools_bucket.go +++ b/cmd/thanos/tools_bucket.go @@ -1133,8 +1133,8 @@ func registerBucketRewrite(app extkingpin.AppClause, objStoreConfig *extflag.Pat } if tbc.dryRun { - level.Info(logger).Log("msg", "dry run finished. Changes should be printed to stderr") - return nil + level.Info(logger).Log("msg", "dry run finished. Changes should be printed to stderr", "Block ID", id) + continue } level.Info(logger).Log("msg", "wrote new block after modifications; flushing", "source", id, "new", newID) From 6b296fdd81b0cbd7013436265b6c625ade2f61cd Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 1 Oct 2021 09:03:22 +0200 Subject: [PATCH 11/33] Moved http configurations packages around [Refactor] (#4715) * Moved http configurations around. Signed-off-by: Bartlomiej Plotka * Moved HTTP configurations around. Signed-off-by: Bartlomiej Plotka --- cmd/thanos/rule.go | 33 ++++++++++++------------ cmd/thanos/sidecar.go | 4 +-- pkg/alert/config.go | 20 +++++++------- pkg/alert/config_test.go | 20 +++++++------- pkg/{query => httpconfig}/config.go | 26 +++++++++---------- pkg/{query => httpconfig}/config_test.go | 17 ++++++------ pkg/{http => httpconfig}/http.go | 6 ++--- pkg/store/prometheus.go | 4 +-- scripts/cfggen/main.go | 14 +++++----- test/e2e/e2ethanos/services.go | 4 +-- test/e2e/rule_test.go | 13 +++++----- test/e2e/rules_api_test.go | 7 +++-- 12 files changed, 81 insertions(+), 87 deletions(-) rename pkg/{query => httpconfig}/config.go (63%) rename pkg/{query => httpconfig}/config_test.go (83%) rename pkg/{http => httpconfig}/http.go (97%) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 6a6cb9cd672..d5893edd2a0 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/util/strutil" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extkingpin" + "github.com/thanos-io/thanos/pkg/httpconfig" extflag "github.com/efficientgo/tools/extkingpin" "github.com/thanos-io/thanos/pkg/alert" @@ -43,12 +44,10 @@ import ( "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/extprom" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" - http_util "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" - "github.com/thanos-io/thanos/pkg/query" thanosrules "github.com/thanos-io/thanos/pkg/rules" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" @@ -266,29 +265,29 @@ func runRule( ) error { metrics := newRuleMetrics(reg) - var queryCfg []query.Config + var queryCfg []httpconfig.Config var err error if len(conf.queryConfigYAML) > 0 { - queryCfg, err = query.LoadConfigs(conf.queryConfigYAML) + queryCfg, err = httpconfig.LoadConfigs(conf.queryConfigYAML) if err != nil { return err } } else { - queryCfg, err = query.BuildQueryConfig(conf.query.addrs) + queryCfg, err = httpconfig.BuildConfig(conf.query.addrs) if err != nil { - return err + return errors.Wrap(err, "query configuration") } // Build the query configuration from the legacy query flags. - var fileSDConfigs []http_util.FileSDConfig + var fileSDConfigs []httpconfig.FileSDConfig if len(conf.query.sdFiles) > 0 { - fileSDConfigs = append(fileSDConfigs, http_util.FileSDConfig{ + fileSDConfigs = append(fileSDConfigs, httpconfig.FileSDConfig{ Files: conf.query.sdFiles, RefreshInterval: model.Duration(conf.query.sdInterval), }) queryCfg = append(queryCfg, - query.Config{ - EndpointsConfig: http_util.EndpointsConfig{ + httpconfig.Config{ + EndpointsConfig: httpconfig.EndpointsConfig{ Scheme: "http", FileSDConfigs: fileSDConfigs, }, @@ -302,16 +301,16 @@ func runRule( extprom.WrapRegistererWithPrefix("thanos_rule_query_apis_", reg), dns.ResolverType(conf.query.dnsSDResolver), ) - var queryClients []*http_util.Client + var queryClients []*httpconfig.Client queryClientMetrics := extpromhttp.NewClientMetrics(extprom.WrapRegistererWith(prometheus.Labels{"client": "query"}, reg)) for _, cfg := range queryCfg { cfg.HTTPClientConfig.ClientMetrics = queryClientMetrics - c, err := http_util.NewHTTPClient(cfg.HTTPClientConfig, "query") + c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "query") if err != nil { return err } c.Transport = tracing.HTTPTripperware(logger, c.Transport) - queryClient, err := http_util.NewClient(logger, cfg.EndpointsConfig, c, queryProvider.Clone()) + queryClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, queryProvider.Clone()) if err != nil { return err } @@ -381,13 +380,13 @@ func runRule( ) for _, cfg := range alertingCfg.Alertmanagers { cfg.HTTPClientConfig.ClientMetrics = amClientMetrics - c, err := http_util.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager") + c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager") if err != nil { return err } c.Transport = tracing.HTTPTripperware(logger, c.Transport) // Each Alertmanager client has a different list of targets thus each needs its own DNS provider. - amClient, err := http_util.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) + amClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) if err != nil { return err } @@ -706,7 +705,7 @@ func removeDuplicateQueryEndpoints(logger log.Logger, duplicatedQueriers prometh func queryFuncCreator( logger log.Logger, - queriers []*http_util.Client, + queriers []*httpconfig.Client, duplicatedQuery prometheus.Counter, ruleEvalWarnings *prometheus.CounterVec, httpMethod string, @@ -762,7 +761,7 @@ func queryFuncCreator( } } -func addDiscoveryGroups(g *run.Group, c *http_util.Client, interval time.Duration) { +func addDiscoveryGroups(g *run.Group, c *httpconfig.Client, interval time.Duration) { ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { c.Discover(ctx) diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index fbed45f4fa9..8584492b4fe 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -29,7 +29,7 @@ import ( "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/extprom" - thanoshttp "github.com/thanos-io/thanos/pkg/http" + "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/logging" meta "github.com/thanos-io/thanos/pkg/metadata" thanosmodel "github.com/thanos-io/thanos/pkg/model" @@ -228,7 +228,7 @@ func runSidecar( t := exthttp.NewTransport() t.MaxIdleConnsPerHost = conf.connection.maxIdleConnsPerHost t.MaxIdleConns = conf.connection.maxIdleConns - c := promclient.NewClient(&http.Client{Transport: tracing.HTTPTripperware(logger, t)}, logger, thanoshttp.ThanosUserAgent) + c := promclient.NewClient(&http.Client{Transport: tracing.HTTPTripperware(logger, t)}, logger, httpconfig.ThanosUserAgent) promStore, err := store.NewPrometheusStore(logger, reg, c, conf.prometheus.url, component.Sidecar, m.Labels, m.Timestamps, m.Version) if err != nil { diff --git a/pkg/alert/config.go b/pkg/alert/config.go index b6d5c0b33ee..1572821cf28 100644 --- a/pkg/alert/config.go +++ b/pkg/alert/config.go @@ -13,10 +13,10 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/relabel" + "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/discovery/dns" - http_util "github.com/thanos-io/thanos/pkg/http" ) type AlertingConfig struct { @@ -25,10 +25,10 @@ type AlertingConfig struct { // AlertmanagerConfig represents a client to a cluster of Alertmanager endpoints. type AlertmanagerConfig struct { - HTTPClientConfig http_util.ClientConfig `yaml:"http_config"` - EndpointsConfig http_util.EndpointsConfig `yaml:",inline"` - Timeout model.Duration `yaml:"timeout"` - APIVersion APIVersion `yaml:"api_version"` + HTTPClientConfig httpconfig.ClientConfig `yaml:"http_config"` + EndpointsConfig httpconfig.EndpointsConfig `yaml:",inline"` + Timeout model.Duration `yaml:"timeout"` + APIVersion APIVersion `yaml:"api_version"` } // APIVersion represents the API version of the Alertmanager endpoint. @@ -61,10 +61,10 @@ func (v *APIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { func DefaultAlertmanagerConfig() AlertmanagerConfig { return AlertmanagerConfig{ - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ Scheme: "http", StaticAddresses: []string{}, - FileSDConfigs: []http_util.FileSDConfig{}, + FileSDConfigs: []httpconfig.FileSDConfig{}, }, Timeout: model.Duration(time.Second * 10), APIVersion: APIv1, @@ -111,7 +111,7 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage break } } - var basicAuth http_util.BasicAuth + var basicAuth httpconfig.BasicAuth if parsed.User != nil && parsed.User.String() != "" { basicAuth.Username = parsed.User.Username() pw, _ := parsed.User.Password() @@ -119,10 +119,10 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage } return AlertmanagerConfig{ - HTTPClientConfig: http_util.ClientConfig{ + HTTPClientConfig: httpconfig.ClientConfig{ BasicAuth: basicAuth, }, - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ PathPrefix: parsed.Path, Scheme: scheme, StaticAddresses: []string{host}, diff --git a/pkg/alert/config_test.go b/pkg/alert/config_test.go index 71aaee399cf..11920a342de 100644 --- a/pkg/alert/config_test.go +++ b/pkg/alert/config_test.go @@ -9,7 +9,7 @@ import ( "gopkg.in/yaml.v2" - "github.com/thanos-io/thanos/pkg/http" + "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/testutil" ) @@ -54,7 +54,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "http://localhost:9093", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, @@ -64,7 +64,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "https://am.example.com", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"am.example.com"}, Scheme: "https", }, @@ -74,7 +74,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dns+http://localhost:9093", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"dns+localhost:9093"}, Scheme: "http", }, @@ -84,7 +84,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dnssrv+http://localhost", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"dnssrv+localhost"}, Scheme: "http", }, @@ -94,7 +94,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "ssh+http://localhost", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"localhost"}, Scheme: "ssh+http", }, @@ -104,7 +104,7 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "dns+https://localhost/path/prefix/", expected: AlertmanagerConfig{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"dns+localhost:9093"}, Scheme: "https", PathPrefix: "/path/prefix/", @@ -115,13 +115,13 @@ func TestBuildAlertmanagerConfiguration(t *testing.T) { { address: "http://user:pass@localhost:9093", expected: AlertmanagerConfig{ - HTTPClientConfig: http.ClientConfig{ - BasicAuth: http.BasicAuth{ + HTTPClientConfig: httpconfig.ClientConfig{ + BasicAuth: httpconfig.BasicAuth{ Username: "user", Password: "pass", }, }, - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, diff --git a/pkg/query/config.go b/pkg/httpconfig/config.go similarity index 63% rename from pkg/query/config.go rename to pkg/httpconfig/config.go index 12918e614f1..3280e333782 100644 --- a/pkg/query/config.go +++ b/pkg/httpconfig/config.go @@ -1,7 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -package query +package httpconfig import ( "fmt" @@ -11,20 +11,20 @@ import ( "gopkg.in/yaml.v2" "github.com/pkg/errors" - http_util "github.com/thanos-io/thanos/pkg/http" ) +// Config is a structure that allows pointing to various HTTP endpoint, e.g ruler connecting to queriers. type Config struct { - HTTPClientConfig http_util.ClientConfig `yaml:"http_config"` - EndpointsConfig http_util.EndpointsConfig `yaml:",inline"` + HTTPClientConfig ClientConfig `yaml:"http_config"` + EndpointsConfig EndpointsConfig `yaml:",inline"` } func DefaultConfig() Config { return Config{ - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ Scheme: "http", StaticAddresses: []string{}, - FileSDConfigs: []http_util.FileSDConfig{}, + FileSDConfigs: []FileSDConfig{}, }, } } @@ -45,12 +45,12 @@ func LoadConfigs(confYAML []byte) ([]Config, error) { return queryCfg, nil } -// BuildQueryConfig returns a query client configuration from a static address. -func BuildQueryConfig(queryAddrs []string) ([]Config, error) { - configs := make([]Config, 0, len(queryAddrs)) - for i, addr := range queryAddrs { +// BuildConfig returns a configuration from a static addresses. +func BuildConfig(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { if addr == "" { - return nil, errors.Errorf("static querier address cannot be empty at index %d", i) + return nil, errors.Errorf("static address cannot be empty at index %d", i) } // If addr is missing schema, add http. if !strings.Contains(addr, "://") { @@ -61,10 +61,10 @@ func BuildQueryConfig(queryAddrs []string) ([]Config, error) { return nil, errors.Wrapf(err, "failed to parse addr %q", addr) } if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.Errorf("%q is not supported scheme for querier address", u.Scheme) + return nil, errors.Errorf("%q is not supported scheme for address", u.Scheme) } configs = append(configs, Config{ - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ Scheme: u.Scheme, StaticAddresses: []string{u.Host}, PathPrefix: u.Path, diff --git a/pkg/query/config_test.go b/pkg/httpconfig/config_test.go similarity index 83% rename from pkg/query/config_test.go rename to pkg/httpconfig/config_test.go index 1169df04989..fe876e859bf 100644 --- a/pkg/query/config_test.go +++ b/pkg/httpconfig/config_test.go @@ -1,16 +1,15 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -package query +package httpconfig import ( "testing" - "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/testutil" ) -func TestBuildQueryConfig(t *testing.T) { +func TestBuildConfig(t *testing.T) { for _, tc := range []struct { desc string addresses []string @@ -21,7 +20,7 @@ func TestBuildQueryConfig(t *testing.T) { desc: "single addr without path", addresses: []string{"localhost:9093"}, expected: []Config{{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, @@ -32,13 +31,13 @@ func TestBuildQueryConfig(t *testing.T) { addresses: []string{"localhost:9093", "localhost:9094/prefix"}, expected: []Config{ { - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, }, { - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ StaticAddresses: []string{"localhost:9094"}, Scheme: "http", PathPrefix: "/prefix", @@ -50,7 +49,7 @@ func TestBuildQueryConfig(t *testing.T) { desc: "single addr with path and http scheme", addresses: []string{"http://localhost:9093"}, expected: []Config{{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "http", }, @@ -60,7 +59,7 @@ func TestBuildQueryConfig(t *testing.T) { desc: "single addr with path and https scheme", addresses: []string{"https://localhost:9093"}, expected: []Config{{ - EndpointsConfig: http.EndpointsConfig{ + EndpointsConfig: EndpointsConfig{ StaticAddresses: []string{"localhost:9093"}, Scheme: "https", }, @@ -83,7 +82,7 @@ func TestBuildQueryConfig(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - cfg, err := BuildQueryConfig(tc.addresses) + cfg, err := BuildConfig(tc.addresses) if tc.err { testutil.NotOk(t, err) return diff --git a/pkg/http/http.go b/pkg/httpconfig/http.go similarity index 97% rename from pkg/http/http.go rename to pkg/httpconfig/http.go index 5b82a1dc4e3..b00204e4259 100644 --- a/pkg/http/http.go +++ b/pkg/httpconfig/http.go @@ -1,8 +1,8 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -// Package http is a wrapper around github.com/prometheus/common/config. -package http +// Package httpconfig is a wrapper around github.com/prometheus/common/config. +package httpconfig import ( "context" @@ -50,7 +50,7 @@ type TLSConfig struct { CertFile string `yaml:"cert_file"` // The client key file for the targets. KeyFile string `yaml:"key_file"` - // Used to verify the hostname for the targets. + // Used to verify the hostname for the targets. See https://tools.ietf.org/html/rfc4366#section-3.1 ServerName string `yaml:"server_name"` // Disable target certificate validation. InsecureSkipVerify bool `yaml:"insecure_skip_verify"` diff --git a/pkg/store/prometheus.go b/pkg/store/prometheus.go index 2952e4467b7..ec3525ad654 100644 --- a/pkg/store/prometheus.go +++ b/pkg/store/prometheus.go @@ -27,12 +27,12 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/store/labelpb" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/thanos-io/thanos/pkg/component" - thanoshttp "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -441,7 +441,7 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que preq.Header.Set("Content-Type", "application/x-stream-protobuf") preq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") - preq.Header.Set("User-Agent", thanoshttp.ThanosUserAgent) + preq.Header.Set("User-Agent", httpconfig.ThanosUserAgent) presp, err = p.client.Do(preq.WithContext(ctx)) if err != nil { return nil, errors.Wrap(err, "send request") diff --git a/scripts/cfggen/main.go b/scripts/cfggen/main.go index e58b6296707..56478558dbf 100644 --- a/scripts/cfggen/main.go +++ b/scripts/cfggen/main.go @@ -15,17 +15,15 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/yaml.v2" - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/query" - "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/cacheutil" - http_util "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/objstore/azure" + "github.com/thanos-io/thanos/pkg/objstore/bos" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/objstore/cos" "github.com/thanos-io/thanos/pkg/objstore/filesystem" @@ -79,12 +77,12 @@ func init() { configs[name(logging.RequestConfig{})] = logging.RequestConfig{} alertmgrCfg := alert.DefaultAlertmanagerConfig() - alertmgrCfg.EndpointsConfig.FileSDConfigs = []http_util.FileSDConfig{{}} + alertmgrCfg.EndpointsConfig.FileSDConfigs = []httpconfig.FileSDConfig{{}} configs[name(alert.AlertingConfig{})] = alert.AlertingConfig{Alertmanagers: []alert.AlertmanagerConfig{alertmgrCfg}} - queryCfg := query.DefaultConfig() - queryCfg.EndpointsConfig.FileSDConfigs = []http_util.FileSDConfig{{}} - configs[name(query.Config{})] = []query.Config{queryCfg} + queryCfg := httpconfig.DefaultConfig() + queryCfg.EndpointsConfig.FileSDConfigs = []httpconfig.FileSDConfig{{}} + configs[name(httpconfig.Config{})] = []httpconfig.Config{queryCfg} for typ, config := range bucketConfigs { configs[name(config)] = client.BucketConfig{Type: typ, Config: config} diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index e129d1dd6c0..c118254339b 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -20,11 +20,11 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/relabel" + "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" ) @@ -501,7 +501,7 @@ func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunn return receiver, nil } -func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config) (*e2e.InstrumentedRunnable, error) { +func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "rule", name) container := filepath.Join(ContainerSharedDir, "data", "rule", name) if err := os.MkdirAll(dir, 0750); err != nil { diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 633d70b1ed8..eeb2c20b9c1 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -18,12 +18,11 @@ import ( "github.com/efficientgo/e2e" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/alert" - http_util "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/promclient" - "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -222,8 +221,8 @@ func TestRule(t *testing.T) { r, err := e2ethanos.NewRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ { - EndpointsConfig: http_util.EndpointsConfig{ - FileSDConfigs: []http_util.FileSDConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ + FileSDConfigs: []httpconfig.FileSDConfig{ { // FileSD which will be used to register discover dynamically am1. Files: []string{filepath.Join(e2ethanos.ContainerSharedDir, amTargetsSubDir, "*.yaml")}, @@ -238,11 +237,11 @@ func TestRule(t *testing.T) { Timeout: model.Duration(10 * time.Second), APIVersion: alert.APIv1, }, - }, []query.Config{ + }, []httpconfig.Config{ { - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ // We test Statically Addressed queries in other tests. Focus on FileSD here. - FileSDConfigs: []http_util.FileSDConfig{ + FileSDConfigs: []httpconfig.FileSDConfig{ { // FileSD which will be used to register discover dynamically q. Files: []string{filepath.Join(e2ethanos.ContainerSharedDir, queryTargetsSubDir, "*.yaml")}, diff --git a/test/e2e/rules_api_test.go b/test/e2e/rules_api_test.go index f55a1175ae9..0d94317c8be 100644 --- a/test/e2e/rules_api_test.go +++ b/test/e2e/rules_api_test.go @@ -16,10 +16,9 @@ import ( "github.com/efficientgo/e2e" "github.com/go-kit/kit/log" "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/httpconfig" - http_util "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/promclient" - "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/labelpb" @@ -64,9 +63,9 @@ func TestRulesAPI_Fanout(t *testing.T) { qBuilder := e2ethanos.NewQuerierBuilder(e, "query") qUninit := qBuilder.BuildUninitiated() - queryCfg := []query.Config{ + queryCfg := []httpconfig.Config{ { - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{qUninit.InternalEndpoint("http")}, Scheme: "http", }, From 48a068c4e8e5d0d3ef1b31207a4126d45b99087c Mon Sep 17 00:00:00 2001 From: Aditi Ahuja <48997495+metonymic-smokey@users.noreply.github.com> Date: Sun, 3 Oct 2021 13:34:24 +0530 Subject: [PATCH 12/33] Add series label for unhealthy index debug log (#4727) * add series label to debug log Signed-off-by: metonymic-smokey * update stats outside error block Signed-off-by: metonymic-smokey * corrected OOO series log position Signed-off-by: metonymic-smokey * save series label after OOO increment Signed-off-by: metonymic-smokey * changed series label to slice Signed-off-by: metonymic-smokey * print series label on separate lines Signed-off-by: metonymic-smokey * print series label as debug log instead of saving Signed-off-by: metonymic-smokey * removed log formatting Signed-off-by: metonymic-smokey * changed log message Signed-off-by: metonymic-smokey --- pkg/block/index.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/block/index.go b/pkg/block/index.go index 851dfa9d98a..4e62cdcfca6 100644 --- a/pkg/block/index.go +++ b/pkg/block/index.go @@ -328,6 +328,7 @@ func GatherIndexHealthStats(logger log.Logger, fn string, minTime, maxTime int64 if ooo > 0 { stats.OutOfOrderSeries++ stats.OutOfOrderChunks += ooo + level.Debug(logger).Log("msg", "found out of order series", "labels", lset) } seriesChunks.Add(int64(len(chks))) From 38ab46f056a7b93718f485b0071eb6f57db8884a Mon Sep 17 00:00:00 2001 From: Akansha Tiwari <36307100+akanshat@users.noreply.github.com> Date: Tue, 5 Oct 2021 12:41:18 +0530 Subject: [PATCH 13/33] e2e: Add memcached as cache in store-gateway test (#4730) * add memcached as cache in store-gateway e2e test Signed-off-by: akanshat * make caching config optional in newStoreGW service Signed-off-by: akanshat * add a separate test for store gateway with memcached Signed-off-by: akanshat * modify comment Signed-off-by: akanshat --- test/e2e/compact_test.go | 2 +- test/e2e/e2ethanos/services.go | 46 ++++++--- test/e2e/store_gateway_test.go | 182 ++++++++++++++++++++++++++++++--- 3 files changed, 199 insertions(+), 31 deletions(-) diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index cc0a61a05f7..e1e0f19364d 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -451,7 +451,7 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { Insecure: true, }, } - str, err := e2ethanos.NewStoreGW(e, "1", svcConfig) + str, err := e2ethanos.NewStoreGW(e, "1", svcConfig, nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(str)) testutil.Ok(t, str.WaitSumMetrics(e2e.Equals(float64(len(rawBlockIDs)+7)), "thanos_blocks_meta_synced")) diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index c118254339b..a768b032c5a 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -27,6 +27,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" + storecache "github.com/thanos-io/thanos/pkg/store/cache" ) const ( @@ -587,7 +588,7 @@ receivers: return s, nil } -func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, relabelConfig ...relabel.Config) (*e2e.InstrumentedRunnable, error) { +func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, cacheConfig *storecache.CachingWithBackendConfig, relabelConfig ...relabel.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "store", name) container := filepath.Join(ContainerSharedDir, "data", "store", name) if err := os.MkdirAll(dir, 0750); err != nil { @@ -603,26 +604,39 @@ func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig if err != nil { return nil, errors.Wrapf(err, "generate store relabel file: %v", relabelConfig) } + cacheConfigBytes := []byte{} + if cacheConfig != nil { + cacheConfigBytes, err = yaml.Marshal(*cacheConfig) + if err != nil { + return nil, errors.Wrapf(err, "generate cache config file: %v", *cacheConfig) + } + } + + args := e2e.BuildArgs(map[string]string{ + "--debug.name": fmt.Sprintf("store-gw-%v", name), + "--grpc-address": ":9091", + "--grpc-grace-period": "0s", + "--http-address": ":8080", + "--log.level": infoLogLevel, + "--data-dir": container, + "--objstore.config": string(bktConfigBytes), + // Accelerated sync time for quicker test (3m by default). + "--sync-block-duration": "3s", + "--block-sync-concurrency": "1", + "--store.grpc.series-max-concurrency": "1", + "--selector.relabel-config": string(relabelConfigBytes), + "--consistency-delay": "30m", + }) + + if len(cacheConfigBytes) != 0 { + args = append(args, "--store.caching-bucket.config", string(cacheConfigBytes)) + } store := NewService( e, fmt.Sprintf("store-gw-%v", name), DefaultImage(), - e2e.NewCommand("store", e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("store-gw-%v", name), - "--grpc-address": ":9091", - "--grpc-grace-period": "0s", - "--http-address": ":8080", - "--log.level": infoLogLevel, - "--data-dir": container, - "--objstore.config": string(bktConfigBytes), - // Accelerated sync time for quicker test (3m by default). - "--sync-block-duration": "3s", - "--block-sync-concurrency": "1", - "--store.grpc.series-max-concurrency": "1", - "--selector.relabel-config": string(relabelConfigBytes), - "--consistency-delay": "30m", - })...), + e2e.NewCommand("store", args...), e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index 06b3b75f8a7..5ed67076a44 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -22,16 +22,18 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" + storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) -// TODO(bwplotka): Extend this test to have multiple stores and memcached. +// TODO(bwplotka): Extend this test to have multiple stores. // TODO(bwplotka): Extend this test for downsampling. func TestStoreGateway(t *testing.T) { t.Parallel() @@ -44,20 +46,44 @@ func TestStoreGateway(t *testing.T) { m := e2ethanos.NewMinio(e, "thanos-minio", bucket) testutil.Ok(t, e2e.StartAndWaitReady(m)) - s1, err := e2ethanos.NewStoreGW(e, "1", client.BucketConfig{ - Type: client.S3, - Config: s3.Config{ - Bucket: bucket, - AccessKey: e2edb.MinioAccessKey, - SecretKey: e2edb.MinioSecretKey, - Endpoint: m.InternalEndpoint("http"), - Insecure: true, + memcached := e2ethanos.NewMemcached(e, "1") + testutil.Ok(t, e2e.StartAndWaitReady(memcached)) + + memcachedConfig := storecache.CachingWithBackendConfig{ + Type: storecache.MemcachedBucketCacheProvider, + BackendConfig: cacheutil.MemcachedClientConfig{ + Addresses: []string{memcached.InternalEndpoint("memcached")}, + MaxIdleConnections: 100, + MaxAsyncConcurrency: 20, + MaxGetMultiConcurrency: 100, + MaxGetMultiBatchSize: 0, + Timeout: time.Minute, + MaxAsyncBufferSize: 10000, + DNSProviderUpdateInterval: 10 * time.Second, }, - }, relabel.Config{ - Action: relabel.Drop, - Regex: relabel.MustNewRegexp("value2"), - SourceLabels: model.LabelNames{"ext1"}, - }) + ChunkSubrangeSize: 16000, + } + + s1, err := e2ethanos.NewStoreGW( + e, + "1", + client.BucketConfig{ + Type: client.S3, + Config: s3.Config{ + Bucket: bucket, + AccessKey: e2edb.MinioAccessKey, + SecretKey: e2edb.MinioSecretKey, + Endpoint: m.InternalEndpoint("http"), + Insecure: true, + }, + }, + &memcachedConfig, + relabel.Config{ + Action: relabel.Drop, + Regex: relabel.MustNewRegexp("value2"), + SourceLabels: model.LabelNames{"ext1"}, + }, + ) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(s1)) // Ensure bucket UI. @@ -248,3 +274,131 @@ func TestStoreGateway(t *testing.T) { // TODO(khyati) Let's add some case for compaction-meta.json once the PR will be merged: https://github.com/thanos-io/thanos/pull/2136. } + +func TestStoreGatewayMemcachedCache(t *testing.T) { + t.Parallel() + + e, err := e2e.NewDockerEnvironment("e2e_test_store_gateway_memcached_cache") + testutil.Ok(t, err) + t.Cleanup(e2ethanos.CleanScenario(t, e)) + + const bucket = "store_gateway_memcached_cache_test" + m := e2ethanos.NewMinio(e, "thanos-minio", bucket) + testutil.Ok(t, e2e.StartAndWaitReady(m)) + + memcached := e2ethanos.NewMemcached(e, "1") + testutil.Ok(t, e2e.StartAndWaitReady(memcached)) + + memcachedConfig := storecache.CachingWithBackendConfig{ + Type: storecache.MemcachedBucketCacheProvider, + BackendConfig: cacheutil.MemcachedClientConfig{ + Addresses: []string{memcached.InternalEndpoint("memcached")}, + MaxIdleConnections: 100, + MaxAsyncConcurrency: 20, + MaxGetMultiConcurrency: 100, + MaxGetMultiBatchSize: 0, + Timeout: time.Minute, + MaxAsyncBufferSize: 10000, + DNSProviderUpdateInterval: 10 * time.Second, + }, + ChunkSubrangeSize: 16000, + } + + s1, err := e2ethanos.NewStoreGW( + e, + "1", + client.BucketConfig{ + Type: client.S3, + Config: s3.Config{ + Bucket: bucket, + AccessKey: e2edb.MinioAccessKey, + SecretKey: e2edb.MinioSecretKey, + Endpoint: m.InternalEndpoint("http"), + Insecure: true, + }, + }, + &memcachedConfig, + ) + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(s1)) + + // We need Prometheus to monitor the metrics exposed by Thanos Store. + prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", "", s1.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) + + q, err := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc"), sidecar.InternalEndpoint("grpc")).Build() + testutil.Ok(t, err) + testutil.Ok(t, e2e.StartAndWaitReady(q)) + + dir := filepath.Join(e.SharedDir(), "tmp") + testutil.Ok(t, os.MkdirAll(dir, os.ModePerm)) + + series := []labels.Labels{labels.FromStrings("a", "1", "b", "2")} + extLset := labels.FromStrings("ext1", "value1", "replica", "1") + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + + now := time.Now() + id, err := e2eutil.CreateBlockWithBlockDelay(ctx, dir, series, 10, timestamp.FromTime(now), timestamp.FromTime(now.Add(2*time.Hour)), 30*time.Minute, extLset, 0, metadata.NoneFunc) + testutil.Ok(t, err) + + l := log.NewLogfmtLogger(os.Stdout) + bkt, err := s3.NewBucketWithConfig(l, s3.Config{ + Bucket: bucket, + AccessKey: e2edb.MinioAccessKey, + SecretKey: e2edb.MinioSecretKey, + Endpoint: m.Endpoint("http"), // We need separate client config, when connecting to minio from outside. + Insecure: true, + }, "test-feed") + testutil.Ok(t, err) + + testutil.Ok(t, objstore.UploadDir(ctx, l, bkt, path.Join(dir, id.String()), id.String())) + + // Wait for store to sync blocks. + // thanos_blocks_meta_synced: 1x loadedMeta 0x labelExcludedMeta 0x TooFreshMeta. + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(1), "thanos_blocks_meta_synced")) + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_blocks_meta_sync_failures_total")) + + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(1), "thanos_bucket_store_blocks_loaded")) + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_drops_total")) + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_bucket_store_block_load_failures_total")) + + t.Run("query with cache miss", func(t *testing.T) { + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", + promclient.QueryOptions{ + Deduplicate: false, + }, + []model.Metric{ + { + "a": "1", + "b": "2", + "ext1": "value1", + "replica": "1", + }, + }, + ) + + testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_store_bucket_cache_operation_hits_total")) + }) + + t.Run("query with cache hit", func(t *testing.T) { + queryAndAssertSeries(t, ctx, q.Endpoint("http"), "{a=\"1\"}", + promclient.QueryOptions{ + Deduplicate: false, + }, + []model.Metric{ + { + "a": "1", + "b": "2", + "ext1": "value1", + "replica": "1", + }, + }, + ) + + testutil.Ok(t, s1.WaitSumMetrics(e2e.Greater(0), "thanos_store_bucket_cache_operation_hits_total")) + }) + +} From 039818ef3471a79255834a47730dade54ccccc29 Mon Sep 17 00:00:00 2001 From: Matej Gera <38492574+matej-g@users.noreply.github.com> Date: Wed, 6 Oct 2021 10:38:29 +0200 Subject: [PATCH 14/33] Query frontend: Add benchmarks for labels codec and query range codec (#4723) * Add benchmarks for labels codec Signed-off-by: Matej Gera * Add benchmarks for queryrange codec Signed-off-by: Matej Gera * Address comment - seriesResp capacity Signed-off-by: Matej Gera --- pkg/queryfrontend/labels_codec_test.go | 210 +++++++++++++++++++++ pkg/queryfrontend/queryrange_codec_test.go | 23 +++ 2 files changed, 233 insertions(+) diff --git a/pkg/queryfrontend/labels_codec_test.go b/pkg/queryfrontend/labels_codec_test.go index 441853d5990..061852ea307 100644 --- a/pkg/queryfrontend/labels_codec_test.go +++ b/pkg/queryfrontend/labels_codec_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io/ioutil" "net/http" "testing" @@ -521,3 +522,212 @@ func TestLabelsCodec_MergeResponse(t *testing.T) { }) } } + +func BenchmarkLabelsCodecEncodeAndDecodeRequest(b *testing.B) { + codec := NewThanosLabelsCodec(false, time.Hour*2) + ctx := context.TODO() + + b.Run("SeriesRequest", func(b *testing.B) { + req := &ThanosSeriesRequest{ + Start: 123000, + End: 456000, + Path: "/api/v1/series", + Dedup: true, + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + reqEnc, err := codec.EncodeRequest(ctx, req) + testutil.Ok(b, err) + _, err = codec.DecodeRequest(ctx, reqEnc) + testutil.Ok(b, err) + } + }) + + b.Run("LabelsRequest", func(b *testing.B) { + req := &ThanosLabelsRequest{ + Path: "/api/v1/labels", + Start: 123000, + End: 456000, + PartialResponse: true, + Matchers: [][]*labels.Matcher{{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")}}, + StoreMatchers: [][]*labels.Matcher{}, + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + reqEnc, err := codec.EncodeRequest(ctx, req) + testutil.Ok(b, err) + _, err = codec.DecodeRequest(ctx, reqEnc) + testutil.Ok(b, err) + } + }) +} + +func BenchmarkLabelsCodecDecodeResponse(b *testing.B) { + codec := NewThanosLabelsCodec(false, time.Hour*2) + ctx := context.TODO() + + b.Run("SeriesResponse", func(b *testing.B) { + seriesData, err := json.Marshal(&ThanosSeriesResponse{ + Status: "success", + Data: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: "foo", Value: "bar"}}}}, + }) + testutil.Ok(b, err) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err := codec.DecodeResponse( + ctx, + makeResponse(seriesData, false), + &ThanosSeriesRequest{}) + testutil.Ok(b, err) + } + }) + + b.Run("SeriesResponseWithHeaders", func(b *testing.B) { + seriesDataWithHeaders, err := json.Marshal(&ThanosSeriesResponse{ + Status: "success", + Data: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: "foo", Value: "bar"}}}}, + Headers: []*ResponseHeader{{Name: cacheControlHeader, Values: []string{noStoreValue}}}, + }) + testutil.Ok(b, err) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err := codec.DecodeResponse( + ctx, + makeResponse(seriesDataWithHeaders, true), + &ThanosSeriesRequest{}) + testutil.Ok(b, err) + } + }) + + b.Run("LabelsResponse", func(b *testing.B) { + labelsData, err := json.Marshal(&ThanosLabelsResponse{ + Status: "success", + Data: []string{"__name__"}, + }) + testutil.Ok(b, err) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err := codec.DecodeResponse( + ctx, + makeResponse(labelsData, false), + &ThanosLabelsRequest{}) + testutil.Ok(b, err) + } + }) + + b.Run("LabelsResponseWithHeaders", func(b *testing.B) { + labelsDataWithHeaders, err := json.Marshal(&ThanosLabelsResponse{ + Status: "success", + Data: []string{"__name__"}, + Headers: []*ResponseHeader{{Name: cacheControlHeader, Values: []string{noStoreValue}}}, + }) + testutil.Ok(b, err) + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err := codec.DecodeResponse( + ctx, + makeResponse(labelsDataWithHeaders, true), + &ThanosLabelsRequest{}) + testutil.Ok(b, err) + } + }) +} + +func BenchmarkLabelsCodecMergeResponses_1(b *testing.B) { + benchmarkMergeResponses(b, 1) +} + +func BenchmarkLabelsCodecMergeResponses_10(b *testing.B) { + benchmarkMergeResponses(b, 10) +} + +func BenchmarkLabelsCodecMergeResponses_100(b *testing.B) { + benchmarkMergeResponses(b, 100) +} + +func BenchmarkLabelsCodecMergeResponses_1000(b *testing.B) { + benchmarkMergeResponses(b, 1000) +} + +func benchmarkMergeResponses(b *testing.B, size int) { + codec := NewThanosLabelsCodec(false, time.Hour*2) + queryResLabel, queryResSeries := makeQueryRangeResponses(size) + + b.Run("SeriesResponses", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, _ = codec.MergeResponse(queryResSeries...) + } + }) + + b.Run("LabelsResponses", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, _ = codec.MergeResponse(queryResLabel...) + } + }) + +} + +func makeQueryRangeResponses(size int) ([]queryrange.Response, []queryrange.Response) { + labelResp := make([]queryrange.Response, 0, size) + seriesResp := make([]queryrange.Response, 0, size*2) + + // Generate with some duplicated values. + for i := 0; i < size; i++ { + labelResp = append(labelResp, &ThanosLabelsResponse{ + Status: "success", + Data: []string{fmt.Sprintf("data-%d", i), fmt.Sprintf("data-%d", i+1)}, + }) + + seriesResp = append( + seriesResp, + &ThanosSeriesResponse{ + Status: "success", + Data: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: fmt.Sprintf("foo-%d", i), Value: fmt.Sprintf("bar-%d", i)}}}}, + }, + &ThanosSeriesResponse{ + Status: "success", + Data: []labelpb.ZLabelSet{{Labels: []labelpb.ZLabel{{Name: fmt.Sprintf("foo-%d", i+1), Value: fmt.Sprintf("bar-%d", i+1)}}}}, + }, + ) + } + + return labelResp, seriesResp +} + +func makeResponse(data []byte, withHeader bool) *http.Response { + r := &http.Response{ + StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBuffer(data)), + } + + if withHeader { + r.Header = map[string][]string{ + cacheControlHeader: {noStoreValue}, + } + } + + return r +} diff --git a/pkg/queryfrontend/queryrange_codec_test.go b/pkg/queryfrontend/queryrange_codec_test.go index 0f033d402f3..7dc2f46b97b 100644 --- a/pkg/queryfrontend/queryrange_codec_test.go +++ b/pkg/queryfrontend/queryrange_codec_test.go @@ -282,3 +282,26 @@ func TestQueryRangeCodec_EncodeRequest(t *testing.T) { }) } } + +func BenchmarkQueryRangeCodecEncodeAndDecodeRequest(b *testing.B) { + codec := NewThanosQueryRangeCodec(true) + ctx := context.TODO() + + req := &ThanosQueryRangeRequest{ + Start: 123000, + End: 456000, + Step: 1000, + MaxSourceResolution: int64(compact.ResolutionLevel1h), + Dedup: true, + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + reqEnc, err := codec.EncodeRequest(ctx, req) + testutil.Ok(b, err) + _, err = codec.DecodeRequest(ctx, reqEnc) + testutil.Ok(b, err) + } +} From c4da708e6efd29258d1530e102fd2c2aec81d76d Mon Sep 17 00:00:00 2001 From: Akansha Tiwari <36307100+akanshat@users.noreply.github.com> Date: Wed, 6 Oct 2021 14:20:16 +0530 Subject: [PATCH 15/33] e2e: Fix flaky memcached test (#4740) Signed-off-by: akanshat --- test/e2e/compact_test.go | 2 +- test/e2e/e2ethanos/services.go | 14 ++------ test/e2e/store_gateway_test.go | 59 +++++++++++----------------------- 3 files changed, 23 insertions(+), 52 deletions(-) diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index e1e0f19364d..46400797dd1 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -451,7 +451,7 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { Insecure: true, }, } - str, err := e2ethanos.NewStoreGW(e, "1", svcConfig, nil) + str, err := e2ethanos.NewStoreGW(e, "1", svcConfig, "") testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(str)) testutil.Ok(t, str.WaitSumMetrics(e2e.Equals(float64(len(rawBlockIDs)+7)), "thanos_blocks_meta_synced")) diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index a768b032c5a..b0d9b1b3ad0 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -27,7 +27,6 @@ import ( "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" - storecache "github.com/thanos-io/thanos/pkg/store/cache" ) const ( @@ -588,7 +587,7 @@ receivers: return s, nil } -func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, cacheConfig *storecache.CachingWithBackendConfig, relabelConfig ...relabel.Config) (*e2e.InstrumentedRunnable, error) { +func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig, cacheConfig string, relabelConfig ...relabel.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "store", name) container := filepath.Join(ContainerSharedDir, "data", "store", name) if err := os.MkdirAll(dir, 0750); err != nil { @@ -604,13 +603,6 @@ func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig if err != nil { return nil, errors.Wrapf(err, "generate store relabel file: %v", relabelConfig) } - cacheConfigBytes := []byte{} - if cacheConfig != nil { - cacheConfigBytes, err = yaml.Marshal(*cacheConfig) - if err != nil { - return nil, errors.Wrapf(err, "generate cache config file: %v", *cacheConfig) - } - } args := e2e.BuildArgs(map[string]string{ "--debug.name": fmt.Sprintf("store-gw-%v", name), @@ -628,8 +620,8 @@ func NewStoreGW(e e2e.Environment, name string, bucketConfig client.BucketConfig "--consistency-delay": "30m", }) - if len(cacheConfigBytes) != 0 { - args = append(args, "--store.caching-bucket.config", string(cacheConfigBytes)) + if cacheConfig != "" { + args = append(args, "--store.caching-bucket.config", cacheConfig) } store := NewService( diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index 5ed67076a44..e7a2079058f 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -5,6 +5,7 @@ package e2e_test import ( "context" + "fmt" "net/http" "os" "path" @@ -14,6 +15,7 @@ import ( "github.com/efficientgo/e2e" e2edb "github.com/efficientgo/e2e/db" + "github.com/efficientgo/e2e/matchers" "github.com/go-kit/kit/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -22,12 +24,10 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" - storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -49,20 +49,13 @@ func TestStoreGateway(t *testing.T) { memcached := e2ethanos.NewMemcached(e, "1") testutil.Ok(t, e2e.StartAndWaitReady(memcached)) - memcachedConfig := storecache.CachingWithBackendConfig{ - Type: storecache.MemcachedBucketCacheProvider, - BackendConfig: cacheutil.MemcachedClientConfig{ - Addresses: []string{memcached.InternalEndpoint("memcached")}, - MaxIdleConnections: 100, - MaxAsyncConcurrency: 20, - MaxGetMultiConcurrency: 100, - MaxGetMultiBatchSize: 0, - Timeout: time.Minute, - MaxAsyncBufferSize: 10000, - DNSProviderUpdateInterval: 10 * time.Second, - }, - ChunkSubrangeSize: 16000, - } + memcachedConfig := fmt.Sprintf(`type: MEMCACHED +config: + addresses: [%s] +blocks_iter_ttl: 0s +metafile_exists_ttl: 0s +metafile_doesnt_exist_ttl: 0s +metafile_content_ttl: 0s`, memcached.InternalEndpoint("memcached")) s1, err := e2ethanos.NewStoreGW( e, @@ -77,7 +70,7 @@ func TestStoreGateway(t *testing.T) { Insecure: true, }, }, - &memcachedConfig, + memcachedConfig, relabel.Config{ Action: relabel.Drop, Regex: relabel.MustNewRegexp("value2"), @@ -289,20 +282,10 @@ func TestStoreGatewayMemcachedCache(t *testing.T) { memcached := e2ethanos.NewMemcached(e, "1") testutil.Ok(t, e2e.StartAndWaitReady(memcached)) - memcachedConfig := storecache.CachingWithBackendConfig{ - Type: storecache.MemcachedBucketCacheProvider, - BackendConfig: cacheutil.MemcachedClientConfig{ - Addresses: []string{memcached.InternalEndpoint("memcached")}, - MaxIdleConnections: 100, - MaxAsyncConcurrency: 20, - MaxGetMultiConcurrency: 100, - MaxGetMultiBatchSize: 0, - Timeout: time.Minute, - MaxAsyncBufferSize: 10000, - DNSProviderUpdateInterval: 10 * time.Second, - }, - ChunkSubrangeSize: 16000, - } + memcachedConfig := fmt.Sprintf(`type: MEMCACHED +config: + addresses: [%s] +blocks_iter_ttl: 0s`, memcached.InternalEndpoint("memcached")) s1, err := e2ethanos.NewStoreGW( e, @@ -317,17 +300,12 @@ func TestStoreGatewayMemcachedCache(t *testing.T) { Insecure: true, }, }, - &memcachedConfig, + memcachedConfig, ) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(s1)) - // We need Prometheus to monitor the metrics exposed by Thanos Store. - prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", "", s1.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) - - q, err := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc"), sidecar.InternalEndpoint("grpc")).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", s1.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) @@ -380,7 +358,7 @@ func TestStoreGatewayMemcachedCache(t *testing.T) { }, ) - testutil.Ok(t, s1.WaitSumMetrics(e2e.Equals(0), "thanos_store_bucket_cache_operation_hits_total")) + testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2e.Equals(0), []string{`thanos_store_bucket_cache_operation_hits_total`}, e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "config", "chunks")))) }) t.Run("query with cache hit", func(t *testing.T) { @@ -398,7 +376,8 @@ func TestStoreGatewayMemcachedCache(t *testing.T) { }, ) - testutil.Ok(t, s1.WaitSumMetrics(e2e.Greater(0), "thanos_store_bucket_cache_operation_hits_total")) + testutil.Ok(t, s1.WaitSumMetricsWithOptions(e2e.Greater(0), []string{`thanos_store_bucket_cache_operation_hits_total`}, e2e.WithLabelMatchers(matchers.MustNewMatcher(matchers.MatchEqual, "config", "chunks")))) + testutil.Ok(t, s1.WaitSumMetrics(e2e.Greater(0), "thanos_cache_memcached_hits_total")) }) } From ff801b18ba146510b8d521b829b5196e84c0c85c Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Wed, 6 Oct 2021 11:38:34 +0200 Subject: [PATCH 16/33] Pull release-0.23 into main (#4738) * Cut release 0.23.0-rc.0 (#4625) Signed-off-by: Bartlomiej Plotka * Updated version. Signed-off-by: Bartlomiej Plotka * Cut 0.23.0-rc.1 and cherry picked 3 critical commits from main. (#4684) * Sidecar: Fix process external label on promethues v2.28+ use units.Bytes config type (#4657) * Sidecar: Fix process external label when promethues v2.28+ use units.Bytes config type (#4656) Signed-off-by: hanjm * E2E: Upgrade prometheus image version Signed-off-by: hanjm * upgrade Prometheus dependency version to v2.30.0 (#4669) * upgrade Prometheus dependency version to v2.30.0 Signed-off-by: Ben Ye * fix unit test Signed-off-by: Ben Ye # Conflicts: # go.mod # go.sum * Query: Fix (*exemplarsStream).receive/(*metricMetadataStream).receive/(*targetsStreamStream).receive infinite loop when target response Unimplemented error (#4676) (#4681) Signed-off-by: hanjm * Cut 0.23.0-rc.1 Signed-off-by: Bartlomiej Plotka Co-authored-by: Jimmiehan Co-authored-by: Ben Ye * Cut 0.23.0 release. (#4697) * Endpointset: Do not use info client to obtain metadata (for now) (#4714) * Do not use info client to obtain metadata Signed-off-by: Matej Gera * Update CHANGELOG. Signed-off-by: Matej Gera * Comment out client.info usage Signed-off-by: Matej Gera * Fix lint error Signed-off-by: Matej Gera * Cutting 0.23.1 (#4718) Signed-off-by: Bartlomiej Plotka * Moved tutorials Thanos versions to 0.23.1 Signed-off-by: Bartlomiej Plotka * Added volounteer for shepharding, fixed VERSION. Signed-off-by: Bartlomiej Plotka Co-authored-by: Jimmiehan Co-authored-by: Ben Ye Co-authored-by: Matej Gera <38492574+matej-g@users.noreply.github.com> --- CHANGELOG.md | 40 ++++++----- docs/release-process.md | 7 +- pkg/query/endpointset.go | 39 ++++++++--- pkg/query/endpointset_test.go | 70 +++++++++++++++++-- .../thanos/1-globalview/courseBase.sh | 2 +- .../katacoda/thanos/1-globalview/step2.md | 8 +-- .../katacoda/thanos/1-globalview/step3.md | 2 +- tutorials/katacoda/thanos/2-lts/courseBase.sh | 2 +- tutorials/katacoda/thanos/2-lts/step1.md | 4 +- tutorials/katacoda/thanos/2-lts/step2.md | 2 +- tutorials/katacoda/thanos/2-lts/step3.md | 6 +- tutorials/katacoda/thanos/2-lts/step4.md | 2 +- .../thanos/6-query-caching/courseBase.sh | 2 +- .../katacoda/thanos/6-query-caching/step1.md | 4 +- .../katacoda/thanos/6-query-caching/step2.md | 2 +- .../thanos/7-multi-tenancy/courseBase.sh | 2 +- .../katacoda/thanos/7-multi-tenancy/step1.md | 10 +-- .../katacoda/thanos/7-multi-tenancy/step2.md | 2 +- .../thanos/x-playground/courseBase.sh | 2 +- .../katacoda/thanos/x-playground/step1.md | 10 +-- .../katacoda/thanos/x-playground/step2.md | 12 ++-- 21 files changed, 159 insertions(+), 71 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29fbfe848f0..d8d67abc400 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,34 +24,40 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4508](https://github.com/thanos-io/thanos/pull/4508) Adjust and rename `ThanosSidecarUnhealthy` to `ThanosSidecarNoConnectionToStartedPrometheus`; Remove `ThanosSidecarPrometheusDown` alert; Remove unused `thanos_sidecar_last_heartbeat_success_time_seconds` metrics. -## v0.23.0 - In Progress +## [v0.23.1](https://github.com/thanos-io/thanos/tree/release-0.23) - 2021.10.1 + +- [#4714](https://github.com/thanos-io/thanos/pull/4714) EndpointSet: Do not use unimplemented yet new InfoAPI to obtain metadata (avoids unnecessary HTTP roundtrip, instrumentation/alerts spam and logs). + +## [v0.23.0](https://github.com/thanos-io/thanos/tree/release-0.23) - 2021.09.23 ### Added -- [#4453](https://github.com/thanos-io/thanos/pull/4453) Tools: Add flag `--selector.relabel-config-file` / `--selector.relabel-config` / `--max-time` / `--min-time` to filter served blocks. -- [#4482](https://github.com/thanos-io/thanos/pull/4482) COS: Add http_config for cos object store client. -- [#4487](https://github.com/thanos-io/thanos/pull/4487) Query: Add memcached auto discovery support. -- [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: Add search block UI. -- [#4509](https://github.com/thanos-io/thanos/pull/4509) Logging: Adds duration_ms in int64 to the logs. -- [#4462](https://github.com/thanos-io/thanos/pull/4462) UI: Add find overlap block UI. -- [#4469](https://github.com/thanos-io/thanos/pull/4469) Compact: Add flag `compact.skip-block-with-out-of-order-chunks` to skip blocks with out-of-order chunks during compaction instead of halting -- [#4506](https://github.com/thanos-io/thanos/pull/4506) `Baidu BOS` object storage, see [documents](docs/storage.md#baidu-bos) for further information. -- [#4552](https://github.com/thanos-io/thanos/pull/4552) Compact: Adds `thanos_compact_downsample_duration_seconds` histogram. -- [#4594](https://github.com/thanos-io/thanos/pull/4594) reloader: Expose metrics in config reloader to give info on the last operation. -- [#4623](https://github.com/thanos-io/thanos/pull/4623) query-frontend: made HTTP downstream tripper (client) configurable via parameters `--query-range.downstream-tripper-config` and `--query-range.downstream-tripper-config-file`. If your downstream URL is localhost or 127.0.0.1 then it is strongly recommended to bump `max_idle_conns_per_host` to at least 100 so that `query-frontend` could properly use HTTP keep-alive connections and thus reduce the latency of `query-frontend` by about 20%. -- [#4636](https://github.com/thanos-io/thanos/pull/4636) Azure: Support authentication using user-assigned managed identity +- [#4453](https://github.com/thanos-io/thanos/pull/4453) Tools `thanos bucket web`: Add flag `--selector.relabel-config-file` / `--selector.relabel-config` / `--max-time` / `--min-time` to filter served blocks. +- [#4482](https://github.com/thanos-io/thanos/pull/4482) Store: Add `http_config` option for COS object store client. +- [#4487](https://github.com/thanos-io/thanos/pull/4487) Query/Store: Add memcached auto discovery support for all caching clients. +- [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: Add search to the Block UI. +- [#4509](https://github.com/thanos-io/thanos/pull/4509) Logging: Add `duration_ms` in int64 to the logs for easier log filtering. +- [#4462](https://github.com/thanos-io/thanos/pull/4462) UI: Highlighting blocks overlap in the Block UI. +- [#4469](https://github.com/thanos-io/thanos/pull/4469) Compact: Add flag `compact.skip-block-with-out-of-order-chunks` to skip blocks with out-of-order chunks during compaction instead of halting. +- [#4506](https://github.com/thanos-io/thanos/pull/4506) Store: Add `Baidu BOS` object storage, see [documents](docs/storage.md#baidu-bos) for further information. +- [#4552](https://github.com/thanos-io/thanos/pull/4552) Compact: Add `thanos_compact_downsample_duration_seconds` histogram metric. +- [#4594](https://github.com/thanos-io/thanos/pull/4594) Reloader: Expose metrics in config reloader to give info on the last operation. +- [#4619](https://github.com/thanos-io/thanos/pull/4619) Tracing: Added consistent tags to Series call from Querier about number important series statistics: `processed.series`, `processed.samples`, `processed.samples` and `processed.bytes`. This will give admin idea of how much data each component processes per query. +- [#4623](https://github.com/thanos-io/thanos/pull/4623) Query-frontend: Make HTTP downstream tripper (client) configurable via parameters `--query-range.downstream-tripper-config` and `--query-range.downstream-tripper-config-file`. If your downstream URL is localhost or 127.0.0.1 then it is strongly recommended to bump `max_idle_conns_per_host` to at least 100 so that `query-frontend` could properly use HTTP keep-alive connections and thus reduce the latency of `query-frontend` by about 20%. ### Fixed - [#4468](https://github.com/thanos-io/thanos/pull/4468) Rule: Fix temporary rule filename composition issue. -- [#4476](https://github.com/thanos-io/thanos/pull/4476) UI: fix incorrect html escape sequence used for '>' symbol. -- [#4532](https://github.com/thanos-io/thanos/pull/4532) Mixin: Fixed "all jobs" selector in thanos mixin dashboards. -- [#4607](https://github.com/thanos-io/thanos/pull/4607) Azure: Fix Azure MSI Rate Limit +- [#4476](https://github.com/thanos-io/thanos/pull/4476) UI: Fix incorrect html escape sequence used for '>' symbol. +- [#4532](https://github.com/thanos-io/thanos/pull/4532) Mixin: Fix "all jobs" selector in thanos mixin dashboards. +- [#4607](https://github.com/thanos-io/thanos/pull/4607) Azure: Fix Azure MSI Rate Limit. ### Changed -- [#4519](https://github.com/thanos-io/thanos/pull/4519) Query: switch to miekgdns DNS resolver as the default one. +- [#4519](https://github.com/thanos-io/thanos/pull/4519) Query: Switch to miekgdns DNS resolver as the default one. - [#4586](https://github.com/thanos-io/thanos/pull/4586) Update Prometheus/Cortex dependencies and implement LabelNames() pushdown as a result; provides massive speed-up for the labels API in Thanos Query. +- [#4421](https://github.com/thanos-io/thanos/pull/4421) *breaking :warning:*: `--store` (in the future, to be renamed to `--endpoints`) now supports passing any APIs from Thanos gRPC APIs: StoreAPI, MetadataAPI, RulesAPI, TargetsAPI and ExemplarsAPI (in oppose in the past you have to put it in hidden `--targets`, `--rules` etc flags). `--store` will now automatically detect what APIs server exposes. +- [#4669](https://github.com/thanos-io/thanos/pull/4669) Moved Prometheus dependency to v2.30. ## [v0.22.0](https://github.com/thanos-io/thanos/tree/release-0.22) - 2021.07.22 diff --git a/docs/release-process.md b/docs/release-process.md index e85556ac319..97d82f125d1 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -23,8 +23,9 @@ Release shepherd responsibilities: | Release | Time of first RC | Shepherd (GitHub handle) | |---------|----------------------|-----------------------------| -| v0.24.0 | (planned) 2021.09.28 | No one ATM | -| v0.23.0 | 2021.09.01 | `@bwplotka` | +| v0.25.0 | (planned) 2021.12.09 | No one ATM | +| v0.24.0 | (planned) 2021.10.28 | `@squat` | +| v0.23.0 | 2021.09.02 | `@bwplotka` | | v0.22.0 | 2021.07.06 | `@GiedriusS` | | v0.21.0 | 2021.05.28 | `@metalmatze` and `@onprem` | | v0.20.0 | 2021.04.23 | `@kakkoyun` | @@ -120,7 +121,7 @@ The whole release from release candidate `rc.0` to actual release should have ex 10. Announce `#thanos` slack channel. -11. Pull commits from release branch to main branch for non `rc` releases. +11. Pull commits from release branch to main branch for non `rc` releases. Make sure to not modify `VERSION`, it should be still pointing to `version+1-dev` ([TODO to automate this](https://github.com/thanos-io/thanos/issues/4741)) 12. After releasing a major version, please cut a release for `kube-thanos` as well. https://github.com/thanos-io/kube-thanos/releases Make sure all the flag changes are reflected in the manifests. Otherwise, the process is the same, except we don't have `rc` for the `kube-thanos`. We do this to make sure we have compatible manifests for each major versions. diff --git a/pkg/query/endpointset.go b/pkg/query/endpointset.go index 13625aa8283..727299db1f1 100644 --- a/pkg/query/endpointset.go +++ b/pkg/query/endpointset.go @@ -31,7 +31,8 @@ import ( ) const ( - unhealthyEndpointMessage = "removing endpoint because it's unhealthy or does not exist" + unhealthyEndpointMessage = "removing endpoint because it's unhealthy or does not exist" + noMetadataEndpointMessage = "cannot obtain metadata: neither info nor store client found" // Default minimum and maximum time values used by Prometheus when they are not passed as query parameter. MinTime = -9223309901257974 @@ -76,17 +77,27 @@ func (es *grpcEndpointSpec) Addr() string { // Metadata method for gRPC endpoint tries to call InfoAPI exposed by Thanos components until context timeout. If we are unable to get metadata after // that time, we assume that the host is unhealthy and return error. func (es *grpcEndpointSpec) Metadata(ctx context.Context, client *endpointClients) (*endpointMetadata, error) { - resp, err := client.info.Info(ctx, &infopb.InfoRequest{}, grpc.WaitForReady(true)) - if err != nil { - // Call Info method of StoreAPI, this way querier will be able to discovery old components not exposing InfoAPI. - metadata, merr := es.getMetadataUsingStoreAPI(ctx, client.store) - if merr != nil { - return nil, errors.Wrapf(merr, "fallback fetching info from %s after err: %v", es.addr, err) + // TODO(@matej-g): Info client should not be used due to https://github.com/thanos-io/thanos/issues/4699 + // Uncomment this after it is implemented in https://github.com/thanos-io/thanos/pull/4282. + // if client.info != nil { + // resp, err := client.info.Info(ctx, &infopb.InfoRequest{}, grpc.WaitForReady(true)) + // if err != nil { + // return nil, errors.Wrapf(err, "fetching info from %s", es.addr) + // } + + // return &endpointMetadata{resp}, nil + // } + + // Call Info method of StoreAPI, this way querier will be able to discovery old components not exposing InfoAPI. + if client.store != nil { + metadata, err := es.getMetadataUsingStoreAPI(ctx, client.store) + if err != nil { + return nil, errors.Wrapf(err, "fallback fetching info from %s", es.addr) } return metadata, nil } - return &endpointMetadata{resp}, nil + return nil, errors.New(noMetadataEndpointMessage) } func (es *grpcEndpointSpec) getMetadataUsingStoreAPI(ctx context.Context, client storepb.StoreClient) (*endpointMetadata, error) { @@ -493,7 +504,9 @@ func (e *EndpointSet) getActiveEndpoints(ctx context.Context, endpoints map[stri logger: e.logger, StoreClient: storepb.NewStoreClient(conn), clients: &endpointClients{ - info: infopb.NewInfoClient(conn), + // TODO(@matej-g): Info client should not be used due to https://github.com/thanos-io/thanos/issues/4699 + // Uncomment this after it is implemented in https://github.com/thanos-io/thanos/pull/4282. + // info: infopb.NewInfoClient(conn), store: storepb.NewStoreClient(conn), }, } @@ -667,6 +680,10 @@ func (er *endpointRef) ComponentType() component.Component { er.mtx.RLock() defer er.mtx.RUnlock() + if er.metadata == nil { + return component.UnknownStoreAPI + } + return component.FromString(er.metadata.ComponentType) } @@ -785,13 +802,15 @@ func (er *endpointRef) apisPresent() []string { return apisPresent } +// TODO(@matej-g): Info client should not be used due to https://github.com/thanos-io/thanos/issues/4699 +// Uncomment the nolint directive after https://github.com/thanos-io/thanos/pull/4282. type endpointClients struct { store storepb.StoreClient rule rulespb.RulesClient metricMetadata metadatapb.MetadataClient exemplar exemplarspb.ExemplarsClient target targetspb.TargetsClient - info infopb.InfoClient + info infopb.InfoClient //nolint:structcheck,unused } type endpointMetadata struct { diff --git a/pkg/query/endpointset_test.go b/pkg/query/endpointset_test.go index 3e6b89f38a9..5dc7eefa450 100644 --- a/pkg/query/endpointset_test.go +++ b/pkg/query/endpointset_test.go @@ -19,6 +19,7 @@ import ( "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/testutil" ) @@ -58,7 +59,11 @@ var ( } ruleInfo = &infopb.InfoResponse{ ComponentType: component.Rule.String(), - Rules: &infopb.RulesInfo{}, + Store: &infopb.StoreInfo{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + }, + Rules: &infopb.RulesInfo{}, } storeGWInfo = &infopb.InfoResponse{ ComponentType: component.Store.String(), @@ -93,6 +98,28 @@ func (c *mockedEndpoint) Info(ctx context.Context, r *infopb.InfoRequest) (*info return &c.info, nil } +type mockedStoreSrv struct { + infoDelay time.Duration + info storepb.InfoResponse +} + +func (s *mockedStoreSrv) Info(context.Context, *storepb.InfoRequest) (*storepb.InfoResponse, error) { + if s.infoDelay > 0 { + time.Sleep(s.infoDelay) + } + + return &s.info, nil +} +func (s *mockedStoreSrv) Series(*storepb.SeriesRequest, storepb.Store_SeriesServer) error { + return nil +} +func (s *mockedStoreSrv) LabelNames(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + return nil, nil +} +func (s *mockedStoreSrv) LabelValues(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + return nil, nil +} + type APIs struct { store bool metricMetadata bool @@ -113,6 +140,25 @@ type testEndpoints struct { exposedAPIs map[string]*APIs } +func componentTypeToStoreType(componentType string) storepb.StoreType { + switch componentType { + case component.Query.String(): + return storepb.StoreType_QUERY + case component.Rule.String(): + return storepb.StoreType_RULE + case component.Sidecar.String(): + return storepb.StoreType_SIDECAR + case component.Store.String(): + return storepb.StoreType_STORE + case component.Receive.String(): + return storepb.StoreType_RECEIVE + case component.Debug.String(): + return storepb.StoreType_DEBUG + default: + return storepb.StoreType_STORE + } +} + func startTestEndpoints(testEndpointMeta []testEndpointMeta) (*testEndpoints, error) { e := &testEndpoints{ srvs: map[string]*grpc.Server{}, @@ -130,6 +176,19 @@ func startTestEndpoints(testEndpointMeta []testEndpointMeta) (*testEndpoints, er srv := grpc.NewServer() addr := listener.Addr().String() + storeSrv := &mockedStoreSrv{ + info: storepb.InfoResponse{ + LabelSets: meta.extlsetFn(listener.Addr().String()), + StoreType: componentTypeToStoreType(meta.ComponentType), + }, + infoDelay: meta.infoDelay, + } + + if meta.Store != nil { + storeSrv.info.MinTime = meta.Store.MinTime + storeSrv.info.MaxTime = meta.Store.MaxTime + } + endpointSrv := &mockedEndpoint{ info: infopb.InfoResponse{ LabelSets: meta.extlsetFn(listener.Addr().String()), @@ -143,6 +202,7 @@ func startTestEndpoints(testEndpointMeta []testEndpointMeta) (*testEndpoints, er infoDelay: meta.infoDelay, } infopb.RegisterInfoServer(srv, endpointSrv) + storepb.RegisterStoreServer(srv, storeSrv) go func() { _ = srv.Serve(listener) }() @@ -859,7 +919,7 @@ func TestEndpointSet_APIs_Discovery(t *testing.T) { } return endpointSpec }, - expectedStores: 4, // sidecar + querier + receiver + storeGW + expectedStores: 5, // sidecar + querier + receiver + storeGW + ruler expectedRules: 3, // sidecar + querier + ruler expectedTarget: 2, // sidecar + querier expectedMetricMetadata: 2, // sidecar + querier @@ -895,7 +955,7 @@ func TestEndpointSet_APIs_Discovery(t *testing.T) { NewGRPCEndpointSpec(endpoints.orderAddrs[1], false), } }, - expectedStores: 1, // sidecar + expectedStores: 2, // sidecar + ruler expectedRules: 2, // sidecar + ruler expectedTarget: 1, // sidecar expectedMetricMetadata: 1, // sidecar @@ -908,7 +968,8 @@ func TestEndpointSet_APIs_Discovery(t *testing.T) { NewGRPCEndpointSpec(endpoints.orderAddrs[1], false), } }, - expectedRules: 1, // ruler + expectedStores: 1, // ruler + expectedRules: 1, // ruler }, }, }, @@ -1106,6 +1167,7 @@ func exposedAPIs(c string) *APIs { } case component.Rule.String(): return &APIs{ + store: true, rules: true, } case component.Store.String(): diff --git a/tutorials/katacoda/thanos/1-globalview/courseBase.sh b/tutorials/katacoda/thanos/1-globalview/courseBase.sh index 7e631bbd93c..3432b3c8ec5 100644 --- a/tutorials/katacoda/thanos/1-globalview/courseBase.sh +++ b/tutorials/katacoda/thanos/1-globalview/courseBase.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash docker pull quay.io/prometheus/prometheus:v2.16.0 -docker pull quay.io/thanos/thanos:v0.22.0 +docker pull quay.io/thanos/thanos:v0.23.1 diff --git a/tutorials/katacoda/thanos/1-globalview/step2.md b/tutorials/katacoda/thanos/1-globalview/step2.md index d1e97ab2c50..ae346081711 100644 --- a/tutorials/katacoda/thanos/1-globalview/step2.md +++ b/tutorials/katacoda/thanos/1-globalview/step2.md @@ -10,7 +10,7 @@ component and can be invoked in a single command. Let's take a look at all the Thanos commands: ``` -docker run --rm quay.io/thanos/thanos:v0.22.0 --help +docker run --rm quay.io/thanos/thanos:v0.23.1 --help ```{{execute}} You should see multiple commands that solves different purposes. @@ -53,7 +53,7 @@ docker run -d --net=host --rm \ -v $(pwd)/prometheus0_eu1.yml:/etc/prometheus/prometheus.yml \ --name prometheus-0-sidecar-eu1 \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19090 \ --grpc-address 0.0.0.0:19190 \ @@ -68,7 +68,7 @@ docker run -d --net=host --rm \ -v $(pwd)/prometheus0_us1.yml:/etc/prometheus/prometheus.yml \ --name prometheus-0-sidecar-us1 \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19091 \ --grpc-address 0.0.0.0:19191 \ @@ -81,7 +81,7 @@ docker run -d --net=host --rm \ -v $(pwd)/prometheus1_us1.yml:/etc/prometheus/prometheus.yml \ --name prometheus-1-sidecar-us1 \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19092 \ --grpc-address 0.0.0.0:19192 \ diff --git a/tutorials/katacoda/thanos/1-globalview/step3.md b/tutorials/katacoda/thanos/1-globalview/step3.md index 905a87b8bfc..76f404e90a7 100644 --- a/tutorials/katacoda/thanos/1-globalview/step3.md +++ b/tutorials/katacoda/thanos/1-globalview/step3.md @@ -28,7 +28,7 @@ Click below snippet to start the Querier. ``` docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:29090 \ --query.replica-label replica \ diff --git a/tutorials/katacoda/thanos/2-lts/courseBase.sh b/tutorials/katacoda/thanos/2-lts/courseBase.sh index 286de850263..af98ac3d530 100644 --- a/tutorials/katacoda/thanos/2-lts/courseBase.sh +++ b/tutorials/katacoda/thanos/2-lts/courseBase.sh @@ -2,7 +2,7 @@ docker pull minio/minio:RELEASE.2019-01-31T00-31-19Z docker pull quay.io/prometheus/prometheus:v2.20.0 -docker pull quay.io/thanos/thanos:v0.22.0 +docker pull quay.io/thanos/thanos:v0.23.1 docker pull quay.io/thanos/thanosbench:v0.2.0-rc.1 mkdir /root/editor diff --git a/tutorials/katacoda/thanos/2-lts/step1.md b/tutorials/katacoda/thanos/2-lts/step1.md index cdb069b6327..5cca5de9c05 100644 --- a/tutorials/katacoda/thanos/2-lts/step1.md +++ b/tutorials/katacoda/thanos/2-lts/step1.md @@ -117,7 +117,7 @@ Similar to previous course, let's setup global view querying with sidecar: docker run -d --net=host --rm \ --name prometheus-0-eu1-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19090 \ --grpc-address 0.0.0.0:19190 \ @@ -130,7 +130,7 @@ so we will make sure we point the Querier to the gRPC endpoints of the sidecar: ``` docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:9091 \ --query.replica-label replica \ diff --git a/tutorials/katacoda/thanos/2-lts/step2.md b/tutorials/katacoda/thanos/2-lts/step2.md index 8a7436dc8de..717a3ea9d80 100644 --- a/tutorials/katacoda/thanos/2-lts/step2.md +++ b/tutorials/katacoda/thanos/2-lts/step2.md @@ -79,7 +79,7 @@ docker run -d --net=host --rm \ -v /root/prom-eu1:/prometheus \ --name prometheus-0-eu1-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --tsdb.path /prometheus \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ diff --git a/tutorials/katacoda/thanos/2-lts/step3.md b/tutorials/katacoda/thanos/2-lts/step3.md index b922333b5db..43e7a145afa 100644 --- a/tutorials/katacoda/thanos/2-lts/step3.md +++ b/tutorials/katacoda/thanos/2-lts/step3.md @@ -6,7 +6,7 @@ In this step, we will learn about Thanos Store Gateway and how to deploy it. Let's take a look at all the Thanos commands: -```docker run --rm quay.io/thanos/thanos:v0.22.0 --help```{{execute}} +```docker run --rm quay.io/thanos/thanos:v0.23.1 --help```{{execute}} You should see multiple commands that solve different purposes, block storage based long-term storage for Prometheus. @@ -32,7 +32,7 @@ You can read more about [Store](https://thanos.io/tip/components/store.md/) here docker run -d --net=host --rm \ -v /root/editor/bucket_storage.yaml:/etc/thanos/minio-bucket.yaml \ --name store-gateway \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ store \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ --http-address 0.0.0.0:19091 \ @@ -49,7 +49,7 @@ Currently querier does not know about store yet. Let's change it by adding Store docker stop querier && \ docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:9091 \ --query.replica-label replica \ diff --git a/tutorials/katacoda/thanos/2-lts/step4.md b/tutorials/katacoda/thanos/2-lts/step4.md index 1a16c561cf3..c703f87e920 100644 --- a/tutorials/katacoda/thanos/2-lts/step4.md +++ b/tutorials/katacoda/thanos/2-lts/step4.md @@ -25,7 +25,7 @@ Click below snippet to start the Compactor. docker run -d --net=host --rm \ -v /root/editor/bucket_storage.yaml:/etc/thanos/minio-bucket.yaml \ --name thanos-compact \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ compact \ --wait --wait-interval 30s \ --consistency-delay 0s \ diff --git a/tutorials/katacoda/thanos/6-query-caching/courseBase.sh b/tutorials/katacoda/thanos/6-query-caching/courseBase.sh index ceb21419e0f..b4c2e508371 100644 --- a/tutorials/katacoda/thanos/6-query-caching/courseBase.sh +++ b/tutorials/katacoda/thanos/6-query-caching/courseBase.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash docker pull quay.io/prometheus/prometheus:v2.22.2 -docker pull quay.io/thanos/thanos:v0.22.0 +docker pull quay.io/thanos/thanos:v0.23.1 docker pull yannrobert/docker-nginx diff --git a/tutorials/katacoda/thanos/6-query-caching/step1.md b/tutorials/katacoda/thanos/6-query-caching/step1.md index b2188e969bf..610ced6b79e 100644 --- a/tutorials/katacoda/thanos/6-query-caching/step1.md +++ b/tutorials/katacoda/thanos/6-query-caching/step1.md @@ -103,7 +103,7 @@ docker run -d --net=host --rm \ -v $(pwd)/prometheus"${i}".yml:/etc/prometheus/prometheus.yml \ --name prometheus-sidecar"${i}" \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address=0.0.0.0:1909"${i}" \ --grpc-address=0.0.0.0:1919"${i}" \ @@ -129,7 +129,7 @@ And now, let's deploy Thanos Querier to have a global overview on our services. ``` docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:10912 \ --grpc-address 0.0.0.0:10901 \ diff --git a/tutorials/katacoda/thanos/6-query-caching/step2.md b/tutorials/katacoda/thanos/6-query-caching/step2.md index 8fce765ccb9..5832b08abdd 100644 --- a/tutorials/katacoda/thanos/6-query-caching/step2.md +++ b/tutorials/katacoda/thanos/6-query-caching/step2.md @@ -62,7 +62,7 @@ And deploy Query Frontend: docker run -d --net=host --rm \ -v $(pwd)/frontend.yml:/etc/thanos/frontend.yml \ --name query-frontend \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query-frontend \ --http-address 0.0.0.0:20902 \ --query-frontend.compress-responses \ diff --git a/tutorials/katacoda/thanos/7-multi-tenancy/courseBase.sh b/tutorials/katacoda/thanos/7-multi-tenancy/courseBase.sh index 951657ec0d0..91ecf9559c3 100644 --- a/tutorials/katacoda/thanos/7-multi-tenancy/courseBase.sh +++ b/tutorials/katacoda/thanos/7-multi-tenancy/courseBase.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash docker pull quay.io/prometheus/prometheus:v2.20.0 -docker pull quay.io/thanos/thanos:v0.22.0 +docker pull quay.io/thanos/thanos:v0.23.1 docker pull quay.io/thanos/prom-label-proxy:v0.3.0-rc.0-ext1 docker pull caddy:2.2.1 diff --git a/tutorials/katacoda/thanos/7-multi-tenancy/step1.md b/tutorials/katacoda/thanos/7-multi-tenancy/step1.md index 51b3b7b39a0..1574c99af79 100644 --- a/tutorials/katacoda/thanos/7-multi-tenancy/step1.md +++ b/tutorials/katacoda/thanos/7-multi-tenancy/step1.md @@ -88,7 +88,7 @@ docker run -d --net=host --rm \ -v $(pwd)/editor/prometheus0_fruit.yml:/etc/prometheus/prometheus.yml \ --name prometheus-0-sidecar-fruit \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19090 \ --grpc-address 0.0.0.0:19190 \ @@ -120,7 +120,7 @@ docker run -d --net=host --rm \ -v $(pwd)/editor/prometheus0_veggie.yml:/etc/prometheus/prometheus.yml \ --name prometheus-0-sidecar-veggie \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19091 \ --grpc-address 0.0.0.0:19191 \ @@ -152,7 +152,7 @@ docker run -d --net=host --rm \ -v $(pwd)/editor/prometheus1_veggie.yml:/etc/prometheus/prometheus.yml \ --name prometheus-01-sidecar-veggie \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19092 \ --grpc-address 0.0.0.0:19192 \ @@ -170,7 +170,7 @@ Fruit: ``` docker run -d --net=host --rm \ --name querier-fruit \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:29091 \ --grpc-address 0.0.0.0:29191 \ @@ -183,7 +183,7 @@ Veggie: ``` docker run -d --net=host --rm \ --name querier-veggie \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:29092 \ --grpc-address 0.0.0.0:29192 \ diff --git a/tutorials/katacoda/thanos/7-multi-tenancy/step2.md b/tutorials/katacoda/thanos/7-multi-tenancy/step2.md index 88182026b45..6380a0df706 100644 --- a/tutorials/katacoda/thanos/7-multi-tenancy/step2.md +++ b/tutorials/katacoda/thanos/7-multi-tenancy/step2.md @@ -11,7 +11,7 @@ docker stop querier-fruit && docker stop querier-veggie ``` docker run -d --net=host --rm \ --name querier-multi \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:29090 \ --grpc-address 0.0.0.0:29190 \ diff --git a/tutorials/katacoda/thanos/x-playground/courseBase.sh b/tutorials/katacoda/thanos/x-playground/courseBase.sh index a6b9cad2a9e..7c15d1711ae 100644 --- a/tutorials/katacoda/thanos/x-playground/courseBase.sh +++ b/tutorials/katacoda/thanos/x-playground/courseBase.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash docker pull quay.io/prometheus/prometheus:v2.20.0 -docker pull quay.io/thanos/thanos:v0.22.0 +docker pull quay.io/thanos/thanos:v0.23.1 docker pull quay.io/thanos/thanosbench:v0.2.0-rc.1 docker pull minio/minio:RELEASE.2019-01-31T00-31-19Z diff --git a/tutorials/katacoda/thanos/x-playground/step1.md b/tutorials/katacoda/thanos/x-playground/step1.md index 6b0a54e2e35..8336605df80 100644 --- a/tutorials/katacoda/thanos/x-playground/step1.md +++ b/tutorials/katacoda/thanos/x-playground/step1.md @@ -169,7 +169,7 @@ docker run -d --net=host --rm \ ### Step1: Sidecar ``` -docker run -it --rm quay.io/thanos/thanos:v0.22.0 --help +docker run -it --rm quay.io/thanos/thanos:v0.23.1 --help ```{{execute}} @@ -180,7 +180,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-eu1-replica0-config.yaml:/etc/prometheus/prometheus.yml \ --name prom-eu1-0-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19091 \ --grpc-address 0.0.0.0:19191 \ @@ -195,7 +195,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-eu1-replica1-config.yaml:/etc/prometheus/prometheus.yml \ --name prom-eu1-1-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19092 \ --grpc-address 0.0.0.0:19192 \ @@ -210,7 +210,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-us1-replica0-config.yaml:/etc/prometheus/prometheus.yml \ --name prom-us1-0-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --http-address 0.0.0.0:19093 \ --grpc-address 0.0.0.0:19193 \ @@ -223,7 +223,7 @@ docker run -d --net=host --rm \ ``` docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:9090 \ --grpc-address 0.0.0.0:19190 \ diff --git a/tutorials/katacoda/thanos/x-playground/step2.md b/tutorials/katacoda/thanos/x-playground/step2.md index bfbbeb91f64..7bf658c9709 100644 --- a/tutorials/katacoda/thanos/x-playground/step2.md +++ b/tutorials/katacoda/thanos/x-playground/step2.md @@ -65,7 +65,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-eu1-replica0:/prometheus \ --name prom-eu1-0-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --tsdb.path /prometheus \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ @@ -85,7 +85,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-eu1-replica1:/prometheus \ --name prom-eu1-1-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --tsdb.path /prometheus \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ @@ -105,7 +105,7 @@ docker run -d --net=host --rm \ -v ${CURR_DIR}/prom-us1-replica0:/prometheus \ --name prom-us1-0-sidecar \ -u root \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ sidecar \ --tsdb.path /prometheus \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ @@ -130,7 +130,7 @@ Let's run Store Gateway server: docker run -d --net=host --rm \ -v ${CURR_DIR}/minio-bucket.yaml:/etc/thanos/minio-bucket.yaml \ --name store-gateway \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ store \ --objstore.config-file /etc/thanos/minio-bucket.yaml \ --http-address 0.0.0.0:19094 \ @@ -143,7 +143,7 @@ docker run -d --net=host --rm \ docker stop querier && \ docker run -d --net=host --rm \ --name querier \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ query \ --http-address 0.0.0.0:9090 \ --grpc-address 0.0.0.0:19190 \ @@ -162,7 +162,7 @@ Visit https://[[HOST_SUBDOMAIN]]-9090-[[KATACODA_HOST]].environments.katacoda.co docker run -d --net=host --rm \ -v ${CURR_DIR}/minio-bucket.yaml:/etc/thanos/minio-bucket.yaml \ --name compactor \ - quay.io/thanos/thanos:v0.22.0 \ + quay.io/thanos/thanos:v0.23.1 \ compact \ --wait --wait-interval 30s \ --consistency-delay 0s \ From 0618ac3974fccdcbd4ac92625a4a0785bf746376 Mon Sep 17 00:00:00 2001 From: Frederic Branczyk Date: Wed, 6 Oct 2021 17:58:35 +0200 Subject: [PATCH 17/33] go.mod: Upgrade cortex (#4744) Signed-off-by: Frederic Branczyk --- go.mod | 7 +++---- go.sum | 37 ++++++++++++++++++------------------- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index da09b9cbf2b..30292e545fe 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0 github.com/chromedp/chromedp v0.5.3 - github.com/cortexproject/cortex v1.10.1-0.20210820081236-70dddb6b70b8 + github.com/cortexproject/cortex v1.10.1-0.20211006150606-fb15b432e267 github.com/davecgh/go-spew v1.1.1 github.com/efficientgo/e2e v0.11.1-0.20210829161758-f4cc6dbdc6ea github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6 @@ -34,7 +34,6 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go v2.0.2+incompatible - github.com/grafana/dskit v0.0.0-20210819132858-471020752967 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -55,7 +54,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/alertmanager v0.23.0 + github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.30.0 @@ -64,7 +63,7 @@ require ( github.com/tencentyun/cos-go-sdk-v5 v0.7.31 github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible - github.com/weaveworks/common v0.0.0-20210722103813-e649eff5ab4a + github.com/weaveworks/common v0.0.0-20210901124008-1fa3f9fa874c go.elastic.co/apm v1.11.0 go.elastic.co/apm/module/apmot v1.11.0 go.uber.org/atomic v1.9.0 diff --git a/go.sum b/go.sum index c1058352617..fad7bcd931c 100644 --- a/go.sum +++ b/go.sum @@ -445,8 +445,8 @@ github.com/cortexproject/cortex v1.6.1-0.20210215155036-dfededd9f331/go.mod h1:8 github.com/cortexproject/cortex v1.7.1-0.20210224085859-66d6fb5b0d42/go.mod h1:u2dxcHInYbe45wxhLoWVdlFJyDhXewsMcxtnbq/QbH4= github.com/cortexproject/cortex v1.7.1-0.20210316085356-3fedc1108a49/go.mod h1:/DBOW8TzYBTE/U+O7Whs7i7E2eeeZl1iRVDtIqxn5kg= github.com/cortexproject/cortex v1.8.1-0.20210422151339-cf1c444e0905/go.mod h1:xxm4/CLvTmDxwE7yXwtClR4dIvkG4S09o5DygPOgc1U= -github.com/cortexproject/cortex v1.10.1-0.20210820081236-70dddb6b70b8 h1:3wtJ9PaFNIpBeSTjjhF7l4qTbvZf0BEX47TEAqqn6G0= -github.com/cortexproject/cortex v1.10.1-0.20210820081236-70dddb6b70b8/go.mod h1:F8PX2IHaeFvqCci46Y+fhskJkCtLvh0OqCKFtWyjP7w= +github.com/cortexproject/cortex v1.10.1-0.20211006150606-fb15b432e267 h1:IwLIfwD1AxH1hlO09m3vdj4cSnlqhgGQV5yVgxnBPjU= +github.com/cortexproject/cortex v1.10.1-0.20211006150606-fb15b432e267/go.mod h1:viwUqGbsFAHfsAGye0tUuyhKrbrlJc6LkvOXQ3j8xM4= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -666,7 +666,6 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.19.26/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= -github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/runtime v0.19.29 h1:5IIvCaIDbxetN674vX9eOxvoZ9mYGQ16fV1Q0VSG+NA= github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= @@ -720,8 +719,8 @@ github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3yg github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod h1:CJP1ZIHwhosNYwIdaHPZK9vHsM3+roNBaZ7U9Of1DXc= github.com/go-redis/redis/v8 v8.2.3/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= -github.com/go-redis/redis/v8 v8.9.0 h1:FTTbB7WqlXfVNdVv0SsxA+oVi0bAwit6bMe3IUucq2o= -github.com/go-redis/redis/v8 v8.9.0/go.mod h1:ik7vb7+gm8Izylxu6kf6wG26/t2VljgCfSQ1DM4O1uU= +github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= +github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -729,6 +728,7 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -935,9 +935,8 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/dskit v0.0.0-20210818123532-6645f87e9e12/go.mod h1:QaNAQaCSFOtG/NHf6Jd/zh67H25kkrVCq36U61Y2Mhw= -github.com/grafana/dskit v0.0.0-20210819132858-471020752967 h1:1Z8LpFZzzpqEK1pq1PU8UGbeUQubO1Idh+jt1XXwB8M= -github.com/grafana/dskit v0.0.0-20210819132858-471020752967/go.mod h1:uF46UNN1/feB1egpq8UGbBBKvJjGgZauW7pcVbeFLLM= +github.com/grafana/dskit v0.0.0-20210908150159-fcf48cb19aa4 h1:OwWd9nQZYfb01HTJjleuO8eOP5t6Hl2EqVjng6W1juc= +github.com/grafana/dskit v0.0.0-20210908150159-fcf48cb19aa4/go.mod h1:m3eHzwe5IT5eE2MI3Ena2ooU8+Hek8IiVXb9yJ1+0rs= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -1229,7 +1228,6 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -1309,8 +1307,9 @@ github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= @@ -1331,8 +1330,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1342,8 +1341,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1422,9 +1421,9 @@ github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6/go.mod h1:MTqVn+vIupE0dzdgo+sMcNCp37SCAi8vPrvKTTnTz9g= github.com/prometheus/alertmanager v0.21.1-0.20210422101724-8176f78a70e1/go.mod h1:gsEqwD5BHHW9RNKvCuPOrrTMiP5I+faJUyLXvnivHik= -github.com/prometheus/alertmanager v0.22.3-0.20210726110322-3d86bd709df8/go.mod h1:BBhEP06PwDGsIKsQzOeTNe2jU6tU19SzhJ41C2ib4XE= -github.com/prometheus/alertmanager v0.23.0 h1:KIb9IChC3kg+1CC388qfr7bsT+tARpQqdsCMoatdObA= github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk= +github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a h1:qroc/F4ygaQ0uc2S+Pyk/exMwnSpokGyN1QjfZ1DiWU= +github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a/go.mod h1:U7pGu+z7A9ZKhK8lq1MvIOp5GdVlZjwOYk+S0h3LSbA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1639,7 +1638,6 @@ github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDG github.com/thanos-io/thanos v0.13.1-0.20210224074000-659446cab117/go.mod h1:kdqFpzdkveIKpNNECVJd75RPvgsAifQgJymwCdfev1w= github.com/thanos-io/thanos v0.13.1-0.20210226164558-03dace0a1aa1/go.mod h1:gMCy4oCteKTT7VuXVvXLTPGzzjovX1VPE5p+HgL1hyU= github.com/thanos-io/thanos v0.13.1-0.20210401085038-d7dff0c84d17/go.mod h1:zU8KqE+6A+HksK4wiep8e/3UvCZLm+Wrw9AqZGaAm9k= -github.com/thanos-io/thanos v0.19.1-0.20210729154440-aa148f8fdb28/go.mod h1:Xskx78e0CYL6w0yDNOZHGdvwQMlsuzPsePmPtbp9Xuk= github.com/thanos-io/thanos v0.22.0/go.mod h1:SZDWz3phcUcBr4MYFoPFRvl+Z9Nbi45HlwQlwSZSt+Q= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= @@ -1691,8 +1689,9 @@ github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LO github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/common v0.0.0-20210419092856-009d1eebd624/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= -github.com/weaveworks/common v0.0.0-20210722103813-e649eff5ab4a h1:ALomSnvy/NPeVoc4a1o7keaHHgLS76r9ZYIlwWWF+KA= github.com/weaveworks/common v0.0.0-20210722103813-e649eff5ab4a/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8= +github.com/weaveworks/common v0.0.0-20210901124008-1fa3f9fa874c h1:+yzwVr4/12cUgsdjbEHq6MsKB7jWBZpZccAP6xvqTzQ= +github.com/weaveworks/common v0.0.0-20210901124008-1fa3f9fa874c/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1991,10 +1990,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg= From 3040829c4c11ad276db6c8d681b581f583a877e2 Mon Sep 17 00:00:00 2001 From: Martin Hauskrecht Date: Thu, 7 Oct 2021 13:18:53 +0200 Subject: [PATCH 18/33] Add lablabs to adopters (#4733) Signed-off-by: Martin Hauskrecht --- website/data/adopters.yml | 5 ++++- website/static/logos/lablabs.png | Bin 0 -> 72950 bytes 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 website/static/logos/lablabs.png diff --git a/website/data/adopters.yml b/website/data/adopters.yml index b5e41f6d09b..4274e443dfb 100644 --- a/website/data/adopters.yml +++ b/website/data/adopters.yml @@ -155,4 +155,7 @@ adopters: logo: pagbank.png - name: Itaú Unibanco url: https://www.itau.com.br/ - logo: itau-unibanco.png \ No newline at end of file + logo: itau-unibanco.png +- name: LabyrinthLabs + url: https://lablabs.io + logo: lablabs.png \ No newline at end of file diff --git a/website/static/logos/lablabs.png b/website/static/logos/lablabs.png new file mode 100644 index 0000000000000000000000000000000000000000..946c0ef790bc0d2a0e66c6cb0b19cd666c8e1d73 GIT binary patch literal 72950 zcmeFZcT|(x5;q(UDhLWHNbf~@?==Y0yL1q#q4ySADAEK3lq$W8fPhGo-lX^5yGSSW z5Fqe9c+Nfd-u1oz+_m2Kzmv6KJ=xEm*)zYHJ$q*IB;jf*vJbIHus|TtLwUIuFF_zI zTM+0D9p)dv2q6!LI|%fM*jrQA?WGBn%E`si!rBf(<>uuCp@Mi?TYx~GvkS?#iDdOr z!8aSs4R@MAv3=BX2Wc~bNpM^}HoegVG1?(8)W}^&XFce}9hyJ9C~q!r<~JODt9U!d z@^I6O!pr#QSR)B(VNH{lJOyXhzRlxJ&6i`Bs@CvKrl*TVSEZS_p?-cFj&T3KV*Dy4YKr*|jwkN{mu3syh%*yU=7K4PwIbt+%i6*hiM&J&1s2Qs%VE zxWmTd{=1qE4#=Mgj%NtOU@Cp}s_Iq)BP7VULCA)qVwdE+>U^-~M0f|S>!rX~okg~# zso15t!OBd8Ly$;je>S~qc|Vi?73ym1K0=1&Cs~!Imu*L#Lbz$gI|+6_7oxX6FSRti zu&?$mr^;HY-ekQ<$d!H;QZ+=cbOcVwYLd{wi9b=Mbpaa@X1$o#*HtQ|0@>6mOs07= ziCJO^fvn0;B!`GF%|E#4np;;fPrrLo^D91MNh%V5(a)2T3;lj}pYU{l9jKTu5Zw4s zurf~p?{NaV)F;-GSxwC<+qbLRRW${QQ;V0I6=mwOq}*;NT%BaLiF%KflZ7wITnDSZ zZ>;u4RuE!*fpiq{#dVF>Jxs0UKbf)bn_NIlyOwFFh;4R_j137Mvku`TG}M0gMy!lY zh1k@H*BI7%Uy?%?Psyt|bZ?GO7@yZU#(N_*$)=lgJL?<$>tEmqeQ0^m(mTfe77+lF zOlo0`n%BSo^N$K%Y(wf0X$4~s7pi-223$X+Hk%;gadQIaZ?J0$YBHTzZkSwDB@kiV zsHZhc*&c17XOYtd*twqzM`2_Yy~|+p!?NZ~|I^mY>+2zopN)}*DV`Usr3Y3g6%iV) zAH4hO3Jm#2CrRq%?j{Pwuzm|NpR(3wUT5e3<7&3+B=w?F*8bG3?|X=1FHwT&=>+sl zxW8mz!JJKj8Rg_`N44}}Ay_1WmYJP9>}OYrK!sU}KFF4P@NTD;yTOnRq(#oXEYY!8 zoH~Zy&~n1&CAtc4p>uZPnPt0yb+@fc+(jjwA5Icj?boN{{X$wvR$ojDsf@Us=_Xkh z@pqAx_{H{5m}I@zU5NB-7hF3xedRw)SafApgXDYa!K;hxK9d@!qYCTzU~eWRO4&yI zXA2kx=L9Vitci=jpy4i3%!Of-YF_}VmfE-`Oz^>-~*zj zmbCF-|sMii+;g*ER9}T zKD+%?h*>>WrF&Wtx{Vai5$Vd0{+zBq)R0?DIdov(MxJ-1Hapnml^t4tnXP5~|U7hZMLVw?Nyk5KR}f1-odwUkQB+jmIut!iW=5 zTx^h+-u^4q0=52-7kldr(w0BP^N5ErPQ4mv_GiD~v~bw?e^Vb;AG_G+!EMZHYLF;CZ`MK&7g zvv(-mXT1IbtL;y$eP`XOefC_`QYL5cheCv+KP|D)JLj-U4Nwrdi$E`Wcg4QuOpnH-YIFBkqZu7pt==-k(7Z-M$ngRv(j`}eMcP#nQ zzzB-QRBDO&%%)Cl7VkVg$JTdNCv#k`&673Y&&9%De()3u;fJjHg}FBxFEeBq3N4AX+oJZ+=S;r^NVk5!^g9#TS{nSZ+`Rp0LGaRuup@38xC#y8^_kUXaMZd*#fU}&=WVLH z1$)C?>KA29ocqrk9o(1RyDzlf6N>o*@?6IFaj!=Le;}m^F>i*PV;Ao|r))faGpf4A zt_8;am0)olB2dy#Q{uKzq%Vp&5Yup);cTV9{t5mZ59gUT2c32VRMgc-!~y#JBStNLkLa_qyj6-V2qn;k{toAxcft9$I4 zR@5A?$3GNehHy4L-kx1a$YtlUVA7ZS;~wEpuG#(IOw&Y+>e^Fr5SepwDR!#0+FR?FuZXLg2n70Cn;gv@ zYU=NmWA=E|^$4J$Uy!Qn)gUlF;dZOA=V(09?l3BRp>Lq)yYV*Ek|Z?HDwJJb%yihP zp}G#&I%C|GB1iwZ74K*r-pG9)Eho(UDWVoco$*~3k`M@a=sBk*`zYn(`-2aU?%dbT zbUjoyHHsRX=?Q9aXGe4D+kJwcoII$RhqoST=|0x2RQ`@ENLz$12R3?)Wzrj*`Gd&C ztC5V2l{t)ew z`t^QIjsgTVm!L}{3KtFcpnu_-#9rZT`R&W$+jruR`GfDXZP*3q-Q@dABGLija*AoOW=)>Fz-hX`D>+)kFPc`S>LV6s{B#?A-VqLiFV>&HGCN*Etm zw@%~+Nm3R}G^iglYt-`y<92c-d!0xU@rBeZsK|b@?jl!+$Xj{9Nff&KNN%V=I;VC| z3nVG@i=ew&>L5y4Jo}W6^$A+Zi~6V4hNPkI{ONOJ>t@?pr!nuc6ooUsb&nGx_Kzp^ z{zL2&?Ph;4qd9Z4c|f z1F@#nWO`%1)u}2P(o^GPndt*`ho*D+niYYZ3U7J z6m}osaItXFAcJZAih5q?^nCA<%cSMz;PG=?J5d+tND8TE%Au~TpcO*ZGHiZrFq zL{~2Fim4kzV4~p2Q9uQ`0LVHwcgA9qpGWKt78sHO>D67)>xT!(iioqJ@xd}lO1r+y zU{#2u(_3+y=`9oPY7meNJ^fh0Yetom=Wjt6GnRGb{T-t|V#HrnCs(7lWlowO>D zm-wTTPqINyj41wz5jRTrP`%;WPPf*i=5aS<08@wOsK-wXmf6LL&QM`f^Rat`+9p&w z|LqMX)5lWJtc;XrMBg#_x!m}F1e8AaecBw{j7t+Tpq}F?i2o!+Rcf)1@i0a5omGIF z>Q7e}Iw99=$;XPqN*QqZx6C7s4R3m7zN_EW_{78?@Rp1`;bO15Ujw1uhRqXT$X%LD5oV$dd{A4 z`craHZzUn;nyW=M(9{MM-@Vw}?acLyllXMX@cSHWUpCYKif%Cm#-a4CC*E4rMf$M~ z>5zXiy4Z_tW61&oA3UhnN0qWfz0L4$+61|U^v6ANSVRObiH|*%@;YB>lQ2xN#_8^F zjf(ulqvSnCU+A%xqw=@9%B);Cypd{<8sApGNQ)4GeW;Rp(f1x}tTUVU)A~CvuBo)J zTz%2}kid4%`{)SKt@sx)GT%)ihLXQ4XWk`TwU)LuH*Z}eq4=D~)PE4JDl?91iaCGc z<;nNPQFK@e7M{_>Z}%z5nl%*a!l6cfWkKggw z7Jn~Bc)op37q#*Jj*#~kS&DQL3eK6#P(;^_kNz-u*warTP*$N2dVkotP&`(qwXms> z$QF1oqF&!dmk+N*Gn#4FFcEofU)@anxcbqk67BD^L~Aw zC%3;$-n0oQi47<3o zsZgc9C$QZOJ=9GevF4r})ODxPZSD`r@~Cxw3wkN~47zBhA{pfMmp-fwwn3ZP-W0@7j(0z+*6B;f z$X-0b)(yr)<1U((ASPI?w8{G@H|E#Y4qiT&cIo*!3!?23WmcmW@;d;ZFO=wgB!b4# z+|OF;s0N1y46_e^o;gF5+Y6`!wD36PS6w9tbE4$EOY%FS0~YOI zOH2+%zbVkoA+ZL!HoD46!e);4>?Y=prVw^ddncf)0|JSHJ)KO6$Y+vhdF4deot|;6{FErR-=+~bb(Ouvh%WYvPpYddvMc;V^N8^m|FMT)iCJOgz~fTxoAv{K?}5 z#MR8j+R4q@(Shogr-`YfyPFsd4RD|8U-H>IDJ%cY-of=R6aYLpJWZT9xY#*4?Cm-J zlfuj!1|C;?T z-G5I8W+^KRzi>2jzZFmZg&584`oiXpX4dAyzc2au1%>!{cud($EVy~tczJjP*h~a? zdD-|m1%!A^Of5|L1)#Sx!xY;@X zJ)&l3;${I5h|wrnJGguP`-Y~qJw(IJ!$#N-xPo@!cr~}6E{Z}O-DyNF`8SEsBS5LyEm2SpGJ|h zb_Hg5-Fp0AKCc0B{`1|RTfolx_bn=_-)<{xV)o}Et|lH3^WO^rbbsD5vodk8gaG^d zFNgZqxb^?ZSf*T@fVuhExVX4109<*@*i6g>h1j@w%moD?ye1}m0w({>-PO^;4Qk>7 zd2R`43TOj3&~I(17=J^_^xw0gR*+kqIJvplxH#E3c{RECg?R;qIeA$)xrI47X*m8; znB#U=|4U<0j{hGfqQ58nM;HL;{u~3s3lOb1{tj1vVRj4S|HIc`w)lUz0>Jv8MgAvz z|AFftxc(;u{-??Rc-KF0{Z9z|Pm}-guK#OrVg0+{fj9tJ5ELk9<^spAfKuqblboI_ z2*gf%`-kS45B2~CG2G;pr7_mA?%;6VNf&ks1A(YO@-Loidd}`Hc%{v|!_E(fpwSDv z76LN^@{%gice-0YsHr_xlaL6(iqI^7DSA+3T26DfYF2xEG8Z)xHpx6rDvf`q_whbx z1+y0KiHl@6%>HaKIUVWe@9+CKhFMbX>?2H^vUhEz{e7~RzlCKpPZ@keL_L-c?Y3S6 zUiTW_J^u3wB%wC^*Cm;A56mw)pK&ib3V!ry!z{mopC>0f*vz4@Ct)8Bj~{}J*ZB>zU>AD{eZ*q|0Edy9E<XG!kfRKHW?OIC&0ANZgESX}V%qT%)~JE3BJwIpI}U{4jkr#my+ z^r@B>nGPPq>*8%jTAH~#tS+a}hT>!_3ZB2E4sxwk(qh+19h&_4Z4&#IUqI3)AkF5` z;fgk>tBcGZ?VKF!f>w*x)Yu=5)@C{^>E8`+b@EOb#{9-lJA*mSSE@M+ajPk50| z{;mSf4S@}Hz)*iSAzwJ>aJ%`B=IGc`CYf4M>%E++9<*H zhM!meMRHrAJa|E#@8W7{vRM6zG;^UTyQFWVMUk8>^p z4s%!itgz$K_q77jYG{I4^EqNpT@#asWLuo~bh+oDJFRW;!#lSGS$_bvkXrI=z2cm{ zh<~L=UwSHZ&cV+Zw?L9ei18+{y{pNg|KM>DMs26a?R<$Fpx%;@wuZAi+U}i~X$=i6 zH0cP!x7LatksQs$ka4Lf{I0h)3xF!=^PLXC#o1Bk1)kLX#c#5La1{ENM>3l0;zIUq zUq+(ZZC@?_uDP<7fFmJ`Qc-exWD?nTI_LM@!FERQV)=tMrpAtK=|>?i7c)acaZ9RO z2|%zkpw>dYbj|kZi3XnItIICbJ*sbj_sNVP*JQ6wFIMdd(M zh>#hDYcdZh%t`)SI}BJGwon?~HZOR235+J@HEn8Py>esfX+rjocZZ~qN@1eJGjUOq^S3Wj!LFy zXI{`f2+t?96Q*>-jyTmDUk$}_9S0P{6zpg48g32NE;K~)@biq0OrtaPiZA=|&w2l% zvSU5YVcDuGchmsJ4eT(29Ap0(ck-qV7veJ+SS;o})!7|i%yLML>SW1G3~y?q)jAfo^%N3tr9k$ z9Mc_IkbpMT0Y%6pNYAR%!ImqK;&x!2~HZ(vz!0QC5fVgLr(R|T{k?7#uA%yeQa3LQm{(aXqro@ijEfs*_ z>k{hdmsez>J-o!gA3?vwOrWTZ2HNBT7T>jWjumZ!Oj*}}k;TgOudJgfTmc0t;NIIS z$|7buf`B2;is7)EYo%w~B?gZ6EbOoACYb>f$^hX8Wo;?Dg2H7*o#Pc7R;Oo7kuxL> z8M$tuBBAw1z^V2%bKZ4nm2W528zJs{6p`6k?izv?KmmmJmTPsDSRvBt#?$?~eW`QG zL9Z+G@{b3g91R<_d;Gb;#xGkWhYs{v-YQ6Kqn6XTfI(jiJh-7nX7dr zD7D?;es~d2&z5ev)OfHMC1NK71pKoBshBT}_zJL<4BwlsnD6gJK*Kx$!qd=tVQj*M z`b}nop{?RoI%L>gTQKV2C{eP;8n8~;c#gCx;(~r&V~@N;Ou0n+4rmkY7TQQb`Cs<4 z$i#YrX>XM!0Vs6GXi)E~3p7&wma2Q3Z|@5BAxSz*>6$>F48F-ru_gLq}D~q07hsat53NYcAp|(^I41lAd~Rf>`55x=*Z z->g3}>%N~B5?L*B7qIyG@rCAd_^uN_CukV&HY4MZRV=^k?b04t8neMaff}x#k9x5z ziFlpJT2RFHy|@Q_$*4ub10japx9&2uOIGqy*93uMV z3?mRRH1R`dKfT)&fj%?Rf~fyxS-5=c!RDuQxSvz<`)&yyz!@;$1`RdER)8WBHrtQG z&OQ`tNvrd^S*1Tk19?jfVuF^X*Xe3a);n)JEh^M^jd~()d z`UJQ{+I6T|>Gg&|@kL8PcR{AN`2dyAFc1@OT@X8B{hX4*KnQ9HELy=E*CpR=nX`i2 z-7K2#e&z8q%Xe)#rCiEJc0wDMG#;EMu3Rw^dE*HYcoh3cQls3*Kz3#3SF~7q$FI`1 z#p+VFj?rpUYfEThKE7qnd-X|#mTRH!3c6wwDup(NV!{~6NH|-_dFz}fPt^Og_jXTx zT(&M%s>nbRLqIx375qp)G9NMD{DUyF`K2?aGiLkojI$O}%C@MJcU;+qNHo9P;cEFg z^$Y6RC1MfpmFD)M;!rIe@(JCW)tlost-En~kGh6Iw_(VFpu)rq&wS^=Wh| z7k#(UYS8BbyNk@_EH6k2h&lJZF=AO}aKW~EF>+qRq!I=!aKwHZ|0{sBMMt;YMX+x( zcF7!Cc2Lt%zbow}*E10FlH02siRy|X6Ru=#zA;0xoV1n5j%3-LZ9bNYq2S`Zvgq?p zqf-^AVqYwWwwL}Y5Q!Lae~RrKTb0PsI4pmDm2;|WxBwS7Z2-bai^SO?A`O{&gL2Zz z9*;z<95stIHkEe%75t2|d`k0CZI_Ah4_8$qr2x~=+7uN`;0k-pchBUKgleznUVqIN zS@@O_l|$Sf)=wl(mq(Z!6SwZ8PYLtu;!(C&oWu61@0;m9I|1*fvSd0k#j~diG2taY z0)ZZ=Rq<&hy>h#%DfRhMvq)ghJtDH?7aIbc#qZwWv;{3|^;w?;`>zW{J#_!q;ej+?=$w3= z?p@*F0L4q6w(}2`%4AQa6J9Bgk#DbU--9{*`dRr2T%K#a*qN@n>tMK`{*?Rk8)o)y+=2U@^mvxhebfdI;j%nPeJW-J$(l ziU+vzWJx7MJ)ugG5^$uPn&!PzSIXDUU5&*)=Z&*5WjCJgRk-EA>9@X&9)QLm-$py+ z*zDUXoGaIkl+O)qf^DCxS4{{zGh>;YXB9;jDEq=I!#iBAimOs77b-8y6Qda)Y#MGI zpH<(W<{?ISBWNIF<0kcpHk~H7o)mW@!wM6}p?Om>27y=pTbt_b4=6#KCP3^(SA5bo zV9O8l>gWs<4R(3*Y(g_#*rhu}RvCZL2&nSh`HxEr*KTaK&!hK8qlyKVYOJTm$U}A| z$h*yGfTa0>aTVXh)Xh{Ehv(2#_1>=NaLv=M9)Z%@gWzVtw%bf${`st)eF zZ$>aDW2)D2H#Jxga=|f_f(oC}km=jtP{nUs_T7)Q#`Aak^(9pm{AAnTt{DITTQx8P9Qp{Q4%mw7Ew1+6ldjg>jaRu zo(~H#_#WnM@r2@r#~;Ij$d4y9$?sjS$-O!-fWHNEw?Ca2V22}ECc~0>C>J$cT*jX@bTH-BYhYM=> zuuXJFu?@B|)KuA$=Uf_}x~JC0RHrldTKD5j`<=aF33Cm4UWY6cvWjJ|6OpK&SM~xoUoCRng;GyqE(Z zF9-;<-u-B@bX9|EbXGIh?6Y8BGk>V0Q+^VmQm!1U7TvzZ5aoY~EJ!>3WY!pV7j%Jn zmplLqv3V@gTzOR+cp$g-D7uvNq{$l3NWkKI47s;7f5afzZr3X;s?l*RdPm2B43iu+ zS>hwZws4T099JR^_@9zG{xek)*Z|mn*{F%=W>x1n!`Edd4!g!-el#Uq@)=&fKUG5T zquBR!@`YqFIK|2CC!h7Ylwk;?bbSHO_Z~Q)6DD9EB{;sc6@w(v_XJ9NfJ|vG;Hc&DY=NpkNaJe;$O<<0%3S`_di)g0p z25gej?PP+yK1yE1^7=nXH)zj2TAnuJ|B-*x4>Z znIe5i^Vf(@S}-oS&c#t@#N~JKk4gcQ$hT&9YrYJmU&n&EO(bu^@MN#y2GL;uUO^+G zspg+N$uShJn`sr6nOqIxr9Uuq->xC%YrZ$uo0kCY2`HloD3gQVd)R@?YR{o%oTVfw z2>bDV4hRe*YdA4Kj{S;DnV|zcJbu@q$+9@5r44;_b{vV~sNrv>Le;~!x=4V5ZjG8y zue=K~mxUinz`W~BPpwp1cyR`7kYA@`q(#RBb$^gesv*FZWQeWYXXEB9d&YKtwi`OX zz-Bo4;g!MKM{;e8OB_2yAoFkM22%{>Q%70TbU_GgC_hI7*{Kd( zjpK8`HvmXRqiI6|KI|2nQ!!k-xT90*Igy3gzuwkZI9E+5;sE{REf zUrVd@6}y2Jea&;}L)wQDU;>7f?JgbrEmqAIGkHnDx~5+#Qxxmp_R!}6fCW^T zc6yHMWWoEjyVwjy((2uiTs!`vK}%dQyWMxzYcc6UhnvKBM@V1g0V>L9+ic2k>uA{W}v!z)nA z&B|k8=(Mqbow8g(W4$n%l3D{lAdd0<5}~CIczT`7Y-RnOQDrbj>$-@dGlo zc7j(2&i-7N!vYQ0Z_CZXw6wHv3(LZ3z|V(`fX48NSxxD})o}DLEKjOr#*5{j>hMgj z&uca>2`TvwD-UVGX9p2%A^UzP;OJ>;a#`CQc^Jk&uBRQ4R9Ig>KRQ}%bNY+&W_RQ+ z_EeR7e4V0v>{aq{QN*F+O^f3oJL_Ir^U5KT^tIirzH%5%T=gv&Uporz`%?K$y6#U^#Zkw~Y#py_PcvU3KHvdcm;2ewqPi9E zGZKS{{hOtF!s_aT-orpx&1=I1VzF=gvCZPA{XEeQc0{p5J_*-ie>UgN6qvd`P>Eh+;V}8l{;%#OFR}2PM}SqwKB)?$#;pq)uDe6CPlGUU zxjQ4&Srb!I4g8OeW&O?~yUwcd71q&Dzhw?VNW!%J4`Kx}E+^MWNw--X%`TkF>g)ca zz~BBVg}V<&?cM4n5 zLiT?zmjWLbx>}I?cm~Depn`!Y+&unjQ+zENi9$hjfvyATPLua&B?;sMcrVnFALhE= zdF|Kj(fhToES-DIMpF9auu=a0{!yu+^3wOSXdwg}e3SL`Y6tc#A{3{m8Qxg%+UeR> z=7B$?;AC%h0>8_-te-7);31_SNL6Cj=x$?4qb=%!&FFkoxp;Dy(W9%;C6giA@%zz{ zYmC_0*KFAG?7eltYk#1|(Pcf|Ul2q^@mP?%T7hBK`c0;5H;av5p>QdDY0CPOepq*- zPfCj7LSw~6vXaM-x1s2V0&shWrEl(24+X^eH=5|}_ZB!TeGVy4xB3PzhA)>Y9T6s7 z(PG(2O;$za&Q{|7#%uHY3qC;0rA)N$Rg=v$3;4eEP)1^^gWs+X*z;>)?Qk5Npd#67 z_xb6K=P9xxCiUeM#$czFwKaTu+xxVPzaaCn>g=pEqxnYuiJ$kvC4;K)X9h<%HRXZ# z7pSYgI{&99g3+ld`ZsF^&_&-ljdVjX?`6$|p?-0THp?RlGmBV=izx>(i)-gl*R@i^n*@%goDsleDi;770C zkmL@pPrq+xYq=DNjp3KZCZF?b_G!KwvIve#p=f6C4nzM168Vs+i;~^&Y%j^6jEl*2 zN4wlWJafMdmz9-S#N}$6!4~%ugrL^kPs#0+dEMbDEYwd;+rsk6bf!o1E85vcrp{XI zrdb$gc+Y||(U`*^X)gSjk|iR=O8~Jh(DdDNz4TH$?M4etGsUmQyti^lNoMO%Rzx*p znuqiK`kUGGde!DC;{uPGB*Ht~e!arDMsC-sxKZQ_B`!E=b{-ock@AlHNMYJyO)cUR zsxS>PFMdeN+m6=2PGp6Q+s=XZr>Z1q^M@Bm`!+U8}kLVI~^aMUZAwy!5@Qj zPy>Ln#Ut?A7gd~N%Dz~PpD>+yIi>R~_k6HHz0s$W_@VdsW02#EU?4%OT=#z66Z=C+ zc}bEj&P1_HTQ0L5{KR~`Y;U4S)icY(@%g8dj!e8X=IBR%1Sqh9n}43p745c0aK=d-W8vlxDayaXhouh*d7iR2>uM>C?I+?8S5zWe zYNtvIda;78j9z%Eg-uQnbv@6A!%l(-=B6(rn+Wc7Dzngx3otV`Jn?XfobWq12%$i2 zi1T{)%wTV^t7gi|hCLc4QP>*D>kn)vctCa0l{?>c2^;wazt$2TNuM)GN8P%a_H;$pz zPm`n+BoY!D5fObE{>}qgk13B5MQ&m?LN!oRU*N*0-a8&gh6-GkB!VsiEdHAx5ucO{ zjCA|qiGCgV(dsd!o)@(fA<~e~+#o-yAm9VPlzmO*{g=3M_(u^0VkS(4i;jxQd4}Qg2X$FSlS$muaaxe(lbM;j=wXeh zT}31PFC>1czO;o|w#^~MQ+uw$hbn1-B1|(Vij?NfH55*Pwq@K)h3K2u`TU^D=j}dlgfbj+JxipI?A}y}x}<+P<5ys>zQ_FE<2%*CB1cjax_=bt3d#;rL*A zU?DYZlMi{llOg4|aq>`;2IMIwtv>Z-Y018&Eiayi<2$*YIw9MFfZK^ktyjnAS3^}a zT851tj?LMW=H{u5%O1NpkY77X4}jKS8X2y8L>I*t@p6Hs&(w_9SsK}Z-HOxhi?)d$ zzSWTTbSs5DjV$t%&7NBxQ%%R!*Pb0_Occ9sKi#)nb*$duzZ|c1i%JCVyta-(!)7kdPk2mY{b&4O^Q{dLJ6r8gX(1QYW`uaGCg}qlU70-IV>7-L-c-DXI3b?z$ zKHo-NK%w@^G?~+RBqs*JvRIY8ahmG2fWazwH71RlmQXykb8-54=**8PpOSU4FuW)` z-mhvsfRTIfYqbM0ISI+nQ}X$(ctSR$-*_%dgyP;-0)CdBKX zmnEgmuSzj+6AAbQvVP>5+`06$PIe!SQ`9K~udHl*X2kSv%m6hneT|-4d{tvUp}OcS z5+9qiG+TGr7Zj0`T6dKV>K$mA- z{*zBRwCv)C^aY7}__Fx5_rh!jTh+%GL(bibgACdC&|3P`6aWtdTdXQXDxYo7(NVH4 zAb#-l4G^S_Y!ZNPTwud*ab1r}78fF_;dQ)LKzHNr+qan8!*wKcsZ}!xVwZ!5MofL$ zc6gX;$y^^ynnu(gFD|t5z<=RV9PV$q>gr+MwAz)6A^W?Q;C1^D1m6uxjFN4xv`8)| z$cKKRX|8;#;Y6uS*iKG;^Lvl&;1AkxoR)jsz_$xp_`-_BO9oJL2KO!;b|-rC;?%;I z9cGnt{F9RXA2Rr$1rV0ckg1B*ucGLESbAo6X~9diZY7}8eHRtI^t06@583?bLVCW6 zyrJa~Yiq>n zffgTh^9BAf9Tw*2L{B{fqkVIRJ6VQD+I`0FicKz58zQRn<6;Pxb4jahC~s%FshqIp ztWRhEgzSX9T~IN0dCiQfg*dW<->x5{khtTuZwzIL;1%i&@3XB_yTM(Po9w<^hSHP$ z_CI7)UnRJSU{5?;Hu)`O^QZ!={(Lhkkg8d0!2t8)9BxMb2%UGtK>4pDa!pE*m7$6H z%zJfgj-_rXMo@1nQ!#C4$@x~dv;h4Gy*!yvgV-FCPyFcA;%vYD)6dKcb!)80nVf4f zHOC0V;iQKjvN!!h46BZo5=993!f&JFXC+Np^MHzLJ#S2)`F0?mf3iLnp2EUEy3z7pCT=rxsa9vZg{uyfWda2ZwxHe3!SEEq1dD zUr5|ZZ2)C)%ixcK$%0SoR5;7}3FbN?VH$p(qg~&=WObs6-n%`rmd81WYis(x%rf%; z0?e1mpD^aWmV#(Arn}RiMLL5da{ZJu3oNN&IBMz+$~ilyb$(*o6^+m9rBDuYOgOZv zOsLi3p#CX%)osXnvYB8n!cuR1^;1hUZ^-=#SMt|Lk*~h956F){EJP#6K zlt~ICiOIw#vZz;?@UlPa9%GC&i=Qy;On4QvO#ItJVhD_sCbgf{ed_sIduhVB^rDD5bkIX!AL21^+)7cUsZcODAMeE0v7SlxnBv5^Pp%r8 zq+`|W$`D!e(f^wT#xb6GvFwGkthA?*kB8vTf&y4tf<~vj*MocHXvTYyWkauOQ{D-J z;)575*9yLHFjZ0EgEBUqe>=r9u4Z$nc3>zL*^b%tbn(+~8!ZWHaff~T126U}`ySN0 zu2u1Bl7Xy3ii(B?Ub(KzP)MRJyF^HKDZO%@nz$T$n{zOo=Vv!1_EN!O;4-DSDv6*N zcA1gdAIWu7!X=ze*p%@2dQKGMP7>q{o?&H@{({frI&^n0{BlhAYfeGjU z^gGCM;Ap|F#~(hV&y_kIjVK*Z629+_(GY(B!~l4~TneyHL*?6@_E$W2;jNAtXpR!rKPYxNfX9SiqI9JMNE( z<0gv+*v8gSvv}9sd(wco?C}V``p3X89U?6*I~0=+74Nj{{Z2fZ$P@>doUEROSUd^g zGfb5QBEEA=f%XS2i<-~*38VV>r>KhX&wQZer5=qb@Pg+b&LF4aE1F9)RXHO8cQZyBO~+-ww7?M zo#=x64H^I&;-R-~8e?D`e6nDS<=4O6M>b-2P=~|D;z>RuW}h%`^nK3k)d0&N8y!+Z z+E*<33+WnZ_SM;wpa4rRuMQ1M-_D6={h-C|-qW<0#QiMxJx7g9vSnLSU;Gn>Op33w zWTG!TZ?YqaXDq5A!#h7@yP(J4J{1>j_a{vWLqclaOcdjVV_-W;;&<=XG`9v|W%;@s zE2yS7MF@hvBZ{%8fila_*M(kMYOZ&{fnbhX6tn8yl@E34e$ex#SwsI|Gnv+IRVFfY5*B3O!Kz?kE*WF-Oc`Qe7^VK=Y+%iikUStYt~#_h1iOV4lqT*e+R=d zE;G1dZiK=j(>po;3wB>ss`NYUuK3#1upAW{uUzc4c*vHh{2OyLkShsDvFX(R#CrLi z2t9~@_M|tUuQ*gzr&0dCn~*ALWoE#}Q$&-cwIwE+)Q2a?iV#jDk=r6Xfph-QE!h!NkEGT|B&t#jnQx;wFs5O5&4@-*D&5f z&QVOE*5iv~=F4n&-lR{x<6-rcDdZsA-RrcH@-~66k04hK=_H43#*CseViNHHgnBD* z+>sm&4L%NDT;-#ZSl@Zv0?o94XX5_zOFA&AgB#bzY5_}``qS7jTkP}`Oj&AstR3BJ zVlGhZ#HD(B=eJ$Zu|F!%Hnyc?Ex)z@rHL(JwYtMzE{3g2XeM7+Zs*n9 ztTF``kc60ovRrb3xbK1}wi$5WOSGEF?LhS8#Z%6E_E-S!(f+$14+ff%4Ef2fSC z;d0(LOOB1BlJ+_Gd4tO8ru&{^XEy!X?UP~)QuUF6d4?VA((Foj(%XxppC~}49}FwZ z6YO`oEY*yO0CI6+eW3&r`8>GGM=3A#O_LYn?btkkYt?|CBx2JM5|2&d* zQPKx>g!uVIk8nf%K7uL^1G|4>;MurL6-Y;abX2scb5T{^{@n1Ze{;O5oyiYbN9}X|MPY{dE@Ti^ah7ZR+A%b+7qJ zI)vL~N6QnlW4{ynu(9F#6=-UTjj_jxd)h}6Bf1)M5JfB zjKRH1+7Yq?_iL8VTpj&(MLE;%yK}osUvAa_@5S3hvI`|Z44bvUeotc%0cp%&3YlP7 z+ro<8=wH78dyXABxN!LjkNkgIXVNs@YOE%jEiHy9LDiZiKR%JPNaj>@*e#L=zsEf9 zE^XEb1A#2xh2GDXjGQsOTJ738;7Lk)KTtK8${Kxz9r#|mv8NMXz-8?^?%*yw$jz_9 zc37jvl=hbtrtd(!^fh@)@QWi(8MHewVgoqL^Dmy{#q2dcLjCYxuPr&F$?*L~}WnH4I-ik)en z9b3rY#1N~zR$cC(EupO;5%{0Sk{&I3{kE)gt6&{l$E7Z7vM1Z}_XRR4V~D%cu7FThJuYz*>6l2FpmxZ;6sse*~Lx+fdF8fq6F=gu zP3A0}<1PEC$MWXHa zsfuGy7%_whvZ5c1o%ChXF*Vsp#K2N^ zdvBE8eJ1NH5@b$U$O1ZTvEUvrb7%qS=H;5G3h(@M3dA@XudxfKNA_tIu$iD`l}qUg zCt4N(7eOFB3VJJz7qh=&_PS}DTU@Sb8ECAMr5sPD4b}t5By;p02e$r#sr-s1;cj;4 zV==QK=HHU64(eipe0Y7X4Sr@rJeMi*HsiRttd<@_Wv_FNUl1tPXMTw-(B*PIMgItD zS4Zt;@b$I|cLDJ$zX#i>{z|SLQxk))1$lU^avYY7bY{B<&JVU~F|{K4sCHt~Q&F5B zkB{d@qlShM!7Gg(WIbKX05tt{dtO}H8$koxizrMV1wB?STCCP>3`07ZA_x|F&Zglc zkMWeHUFfSf;v$R_awEEZ{R50hC@A7quNpA_)pUVkVEx99hgXh1(y0*}BfpE$%C)^b zWUiU3(-yEo11uYo$J?|J-2rMO5dw7RE!XN?m-r?LxXWA;@(YTV6XQ*qBHQ9EfqIiJw02tOt%m)(nIO5?ep08Ca^vx9iW2saR}G0@BGk%T6Tor zUhJpv<`n>-Ng5CHeUDr2$%=&6+3(Gt$e);_gRt8oQ@)Sbt6kl5!Gse!J%1 zySlGZWwNv)!dM^gMKycaguxKZa z)4e|>wmp>Yy5H-!lX1Lmv2LfCMjh?JBT;B2ZW^TIjGB*dFCY)r4KzwfvDy1TSNfXo?o71X=G4q;@#jeL(C5kuaVi~BvaTQ! z7p+Tw6uM2qR3znKe0ydse`2@Wax5rd_9kY@IotSDi!}!x^9`#;cS%QH-fx|@9FrQ3 z!gKsyQD`??>m+CK{yQ?V*;Y(|dVhm2uh;5mU{--4bYorF?}4FXJ5>O*rlXIJVl_8& zddixvuzGyj;u8lya?&9GN#&Nz-eG&<5<%B3R*kzp(Y#6Mbl4eA#w1wPK8D0n+uhe> z2^}R(wW=^L*HOlZ|F-1HDf>+dfg{{$=P`Pyx4GP#l3_YwuouB3nvR^W*#1#FKo){{ zwRS`nnL1?q5qFZ)7v(EHAj>-Ckm)Vnv2l2sh>gNe=}LCQVkpjbk_(x8$A?SPI_S<@ zaO7Kti==#|tLLcK(o3GrPB@3BAr{Q8!WjG1EAK!(KQ4D!F+@4Y%3B=r4ui+<4WjQ`i)ruH9&HAVEEkPNtO+i~?XcQc&w^Q7c8+0Cqc1}!0TE|QkxqwC1&wQ5vF&Ksex%xuJoeo?a zloMBlH}DVgIV~}d`oco9Zzp4)uMVaX&(0;QakYI<&2BpaZAQEnznt!Br>#=7%N>}u zJ#PnEHJ6`Z26!~qMXy(lj#8-CxP2Y(Y6DV_;Zp%u=X3$O`UI_fXgTmF3$4c|9mARk^Uz5rb1txU)>K|2P&nf`vZK zV7DH59}9ihSievf%kanwSwzMuV=ef*Atij0s(%Y+{c|2!+mwm=-?G67tI6=sp=>(Bkd7J^cK z_4jFbtJ@R2?l+dTw$6L9)pZvY;aoI1it)Il!;`S;{LK0(b2n!ptxH~6^?Kw9(aA1{ z2{>fO_mgv*&Q>W|*DyN&TD7NzR(+cMJM1Y4iGkdO-_7r}O$t0|o4-hiYUK~TQ)$EV zERMxt*pH9(&@Wg-iR!9!f<%<)a`qaDPhWZ0l#{6~a5K*t>{PpQGhKIOR|SF}l~h2h ze^1`TSwG)nUKkKuu5Y7_RLNyP8JIY1FHA=wk}J9lR7Uv|4bugoarB`VrV5`&H0<|v zXcwY4apz@ahu+WireU5B#=uTn@qhj5V*h=p>~P7)`!ZO*>}mEYgPfk7Z+2LOW4Aj9 zzD^SVJ-JK>b;y+k0(S}ibhbn%7sh0Alfm6!#5F+~ZMMo`=v)fa# z8ZP0V4bm~nW-BV2qy1(PEUXamA{IDv6&lg8*s3ZLxF724t7mIJ$lcUY74T*BxuST% zE>>fCwas#EdUNijS^{Zb|a_T3U$)JwXL-d}s8n!xPOt|yEvi?L|o5pJT z{`X3<%RZ{nA7l(lHvBo4J)M~7kl54YwV-_$;7+eOJTaxcNDr2V{fSHW|B{Qb=&j~d zdG-8LFnFoy3H72xW5gQG-#-fLAxOxS9_f_(@|fu2xJ3zH;r;De^9+9L%_Y@F6M%@o z7#VfQrwk95BcJAPTH@jrJ!QB11BM5aMnX0UIKe}KRyLX0e?&d;hP}EB6E0Zy@!O$= zDkf0xLwiv?bMlV36`2QaaY=jyZC@6jdGAkr3B>Vn?< zBLBa*GSQ_!i`o|nDjp6^XOv}0v5XhROOh=Zb`onYn z?o7_{EURuKvTUUF5E=0b-jILRDdt3fo=W5V?DUt>a=q(-pI4Ki%qt{0WhoOp4)>qx z>SMH5dxx0q*AR|{9N+P@1{}!&RpWFq)o9oWIZduZb0N)11fRp@QQSa&B+>Np%nW^~ zYnXNjKWn97PGBY3QGws-v_(#00qC`e?d*{mNbyaTD7V*rct%Df9m{;3oPQtotWB-` zBl2`6Y{TWin-^$~Y<=x|VZc`3JrVZjqVymq`8frw=DgUp;4_o-t-yNGp;vX_MmfKb z5sUrd=%>CdS7p~v{`(B~JTR&C4B0_4&SRjv=Ia zF>S#B7{dxH7L>#JA&vb|Mf$Bv{}c0j*5=I|<#Bt3ztut#7GuqkU)s;wX{6UrzejlA zeLH?YDS&%@M`@>*RH}dFD(oB5mzR;sHhOIIxR9~lL#FC0mapKxe|sSm3?XBCDnhrk zSt-P}1QO&1^q1Opl~OzU*!}@MGg(QfPFikq%*H70IXY?Oq^H_OU!Jvi`R-y$dha^? zvCU&$sLgfZu#5UeD0%VYbjN{3^8ubm{#+>UD^R^BEbrE#4squzj&GRb?I*B{f9aSw zb}NyN#^m`)uQ4+cOi(xKT-{r(@2mZ15upeTH9P_7{OgUK1{efg9B{~EY$(yYwMjFX z+(JW@qqQu_s&)=9OtA(SephJcJ8qInS7m+OvMcDD*(Nm#&oG*N_34-r={u0dDV zP~JasNCW!kU_;_@&I9z1jQrgbCJu+ZiCryjEo=@IV}(*u&+L-^96r;dXcqe6Wn`gd zAWEaHXel&z(#}_?UWKLojznJOT7tJsf$fqL#UcEXWr3 zg^laLw%*2bc!-;LJ35&%gH|k1(zWJ!w^^$c#Ip5~VJ6u!3^Lr7$Zvkq$S09_^lU?Q z0KQpsY9FF<%VXfWfdz5THXe|47IPPokGHrn4~rRn*v;_B7fKH8nf!@#xRmpH=SM8{ zS>L-SLT}ccE$tF6^1#W*^>UV1*Wo&n?fq> z&Zm`L)qf+7{R9@)>U+NBz;n*Ue^rDcq#+Ygn~keP))_W{o}f3GAHm%N$RL*QFSM2G zp9OX0JEla@-WWLpB@u=^A=6FtMQk7Hg2M5im+VKwK|W2F*L%cxnj$e20dPc@ESy_4 zE}J}C>_Fv61+s0dHZ5msxvy41aA*TPsf)qaMuc{;IbtN8Eq)Q0=nlRqQm^0U%z34u z+d9d_cUU%nibn}yPqHW89&z8_yJ=liS9T2jm;aJ&7hx;LRZIH4hQk- zVq&mwkZ*^uV|XihVV<~YW4?YF(JDk4C6UO(h&=9UAvjv#WUe#t7yG}>H0YWXaZZ@; zR9lw9dM&7<6m8FO>7aG+RQOL@oUCKk-DG+9)`5X>-(k4BJSa0f8DevZ2HuD zeekW20%Xe^q%u5Y>Wt_vUZ}7mAHng4-d}{L7S0< z(cIl6-9qSMuBdY^@X}sX?W@waMyTh`@oo4a+h15NC0D!qFOHkgZ@a#<0}1>V=d2Vs zryhoZ^*dPvr5omkoh+lNAfF80+F5c5d_1DjcV)Z~*pB}=cI%sqC4RwrkP2ebe2MZ% zsoFb%j5s0Jlg5GW+1AIEP$T2<%3^Y0WKf0Sq3bKqlsRikeZJ;XF^{R_CIU4$f1zN! z`!Trv=wz-xVm3Oc^gB{uzH>82EXA(=D@N_qD9rdo1|d`Bg816%8r+Qcm!a9<1`{R) zcl-YR>on7To?i7mb!~L^75o;x(x}lW3Sb-8o4NAjE$o`nr)qf5{(3CnQlh_N(pt6V zn^Bd)cpTe5Oq(3eb&i%Bxn6ehr382=;5bjYii0&~r0NRs%m!DY7(Es zXm^%pd-Mz1vt_@t?BI!5sM#mzUzTBlP=}GVb^)$7U?XRW(Fj^obpwT%qd+x5I)7G` zq_X7B`VpD*|1RS=PEE&T$DXoWw72(uEUr$kYOpTLW8=mIH#6kN?V3s4s2QUaTI*HB8Qqe{pjBmctVH8c;z<;4 zGgFiseSm11HR_T)w&uw6Ax@JYS4$!%n+uuBK4J?ywEd|2kQ7ZV!13Xy)hB)LC)s-p zL-Wd6=a(KEYs>vOCR&=vDk0dZyynurNqys|eShia=P_-5IjAW3ie0 zfiwHhAbLv?=oJ2iU-Bj90pIT(S)!xRs+?V`UaeU&Pj_-2s z$ZXp=vv30VzQ6B4*2d@yg+97z=->u=I!i4d+4N`1>QU_Vg9gK#30VO6L0#{=WyB7~Lj zBhzd*TXQHHE~o<+2VAQ$mujN5&MA=Qp|V{Ue6mRb!mfrhWr|F;%kis4_Ww7J)^{voDB~O^i%vMXUGWH z4eo8GvM^&R+ZD#DCjm z#Mp_h)1suPwHFxf_u=NdI|&}s$h)2$oE+m`g~KHhQjo#EF9lRA}Bzy3Pll;;;SAAARUaCKDsTwV7+ZYKzIaFxhjiC z*wHX6!tl;VqZ;XisCQn6vr`AJY2uFlTBx7h;5HKTh6a#)9u}>J^9@zOsm%=^QX*Z; z5_E5+b;!%6zrWudb6w0rGEMol+#Yz9gP61QwM?5cD?_gk&4u?vNU1p;|18eo3{MAeK)zCNA zeCKI0D#aOE#9w=AanHX}lqu2_^?9!=M?KPLQw`G0wU>+J1hLns2$L|e+yruXLdpvF z_<+D4<{V~z^ziT${>OMjL^Gr2eJOt{_GKom@Q+ux%OpAD7dn@l(lVef6Ws6eGRmO8 z?i_ofC=3OR!hE2qIK9y~bd>0)E>8)!cS)Flg5}#3$qFBk+y8@r<`U_3yRW;aybsZ_az~beu+C*Xm5mpMyOhhLctj(fyyf3viJ25ll@+$c$>bZEb@tmgX(Xw5 z6YM#-DF3@tnK*F_PQrupdM;7w&7e(7j!&RM3W##`kD(Acrvudqvr+Ny1gKa=en2sRZ5e&T-eZV4RQD$uCjrY(i&*T6&wZL0GoU1-6_46n} z7xtv_zMk@J4a>Nlczqkql=`H;I*h*b9}E8#pnRm@IaUGgeno;3xs z!26de+-RYqD4P5#r+ePRatIPz_WJP4Emji40vppetq{sg-Ah2Mjce!I4sN%m_5&$e zqwRymFag1gkn0=ZB^TQuw7x~FHF+dA`h4ZX!(8S2zeU9L{Uv5WWUGpM4V?*t=#9~f zHbY;8V11I(E$|S!m4KoNv?@O$PdZ!&Dj&MvAA#rBx8~8(=yH`WphlWV-w$&>_h6AH zFP!=MmGw~|25Djp)Q@q24$~obY73b145M9d0?N>$*ACL3j;3WHmN=~Qy{T9#9tq&1 zDu=4+g7zVuhA4}_70M6ryO)%Hc2`9!_6_I0Jky3zOi27@CVb0tadL1xbrH(is}8oF{5k$MV)1OvmFe(7#XM`f}*I$kQbdRMgr;}{inE^8F*Lw>CsAR zUWloK+6{{Ai@nEV5mPmaC~z1!C-`9Gr^OR5A~g3Qx&E0Ms|rIZ3s$)MgJ)F{M6F#= zHBO63o`WpGp`QT^?)uK1P0!EPM%THP4jGq_b$*UKgZ2?~TeGJNG9qra*7xN6den>9 z|DF}M0mlj~9&|zirF!9*>=BN)FVv`iM7Pxzdw6Z0;3VmXDu|*Ott;y7B@gt)t;zd) zdU@V0P%~#$pguGJ&(|Sl@16{pJoT3@BoUzzjv#pE|XpZHR4!KLZx0vmO{))+VK0+`-RJ2U0Icjt(0;#yg0d|Fv2ND1tnwA zjG%wSFom<6wT2>=f>yA}~QGcgxA4J$+QVsf_UtI^7&H<*t8w%ar#wF+d-Bvs3 z=^gdkSI!X%)c4mIT*4%PLkN-3w6)Q8!GIRovc(Vq{cx>4)3qpq3yWWnxcgm_Cqo`f z>ZgjLUWzJe%=7c46uco?(c_hdrYX0D-la0g&PC`LB24Dn5!+v548w;d9xumJGsu*4 z*3cOjz!4G8zorFW4BPnnH}*+Gx|e%Y2g$3ib(*^MEi=KLPGxi~TS5b|Vj|Ew|~W#yS`44mEeSoycFu8K5MSJK#xptqIo&nX`95@p)QtZg$vd6W@*#G!akn1?dCpIv6WO!rOY8 zM>iw-M_L#N0mK~oLp-U1SQNbXxVNmJR;lz@Arpc1aw_y=4uF}$?4(S_p35t<`ggn z8j^U%lYkP={&Z2`$Cu@pc_;<}QWsy8Eltw871Dto*z%P>UIP)~q6L0+J-9M^#dPKG z^wNgVPFV@y+PLhA7o0x)PYb{j9cLLvI)nl0FV(t&l)U#^J;fdRA*(dm)#b=R%nj&m zt+gB9qORO$fkITw=O&M z$Ybmh7I`_5QQm*P73V$Jy941zrm>q6Yvr7$V?RH3ck)v^P?f=dwqFXfsj$ZN2`Tql zl+K>YTBmme(U)(_8EWYAGWGx^3&llK7#}H75iS3qq$$%{ffJ@RXD}V3d~6V`4StRW@F!+g`fzIdzb4@n_nLK z9iSmFb4uk?HWKvaUeP{z-%)sjeMj`!WY)Nb7evo39sOC*^8719-hleam}1GHb5C9X zNe$r*{9Kmj)04DSZRmx1Vri)LTuXjXx!H%^2gh=7Qp#@On$_b|rTQIo6e7U{fG`x! z1bFP`6!Bjknj~86U;nx&&ClyKOT_hdm*Z;-tJ)9a_qXSyHoCZ_*a6<0e;OE~b;0Gh z@!4xI!grsdaj=ZOI9ExM~xK`@Ioozy`zV!x?CqXwr4KNJD54#YF`^qMFlRBDrn#B6Y z@&%7m@f&6z$fDv5ThkNELb4e`n_Qr7YU(q4UGudB;nGB$C2wY!TI{)o8}O#YAaI#6 z&)iCw)Y5>=_2pK)b^iHr@nNHO2iVO>W}(}4hp|+lJTI&D+YjMh{QBw^r%qqY7$rJH z9f=3$_rN78dNXTreQ~{-bKr(R;M-WddD0S{>rqR)*`KfW;WgE6S$5_e>ak*yyk0-t zf8uRjzL>$4`1&|Y7H3P?L=jbQ8Yy=3Sgn>7Lk$F?;}zY(R|ue0hyPngYCq!|gkQv6 z*Z+&pNtlC4k+HTU*?dYM!1ymWp$d$?*aj~H<a8Q(ruMD2TNbnr76~=sHa(=?hvxuvxf(awuADY+j53l z$(N~RD~l2ZxUThi@|Y})k=QJ(hCP=P7av?XU|wX zw>2SX&q(z=ving4r&^7{fl?RhxM2VgSv`9Ki2^3`{%b8?gS!p2$~-T_3a z;t%0ZoH_4h%8N;{6!~V`ouh2hg!MC4((qkN3po}n6&Tp)cJNJSQwagBi?YZ@eGBu} zawl50#P0>YLh!}So7aCv{G`7}P4GzQLOj0SAb>DN5Ayy-WUH{*R4{<@jJAu3Z%$H; z*$CZGenKDX@+E;tOSgCl$B?E0W>SzX>Kk&n(VXhxgfsxR^+%Jv-?5V{~ z)iGW0hq_wsDS{(wr)L zQ;(uJll|oDc+m@Xu0qf1*r}dS-C^HYE?SaMVA=2KQNH+&HC2$yqSq;W=PRQv4U>_T z$7R2eSEey#(_Y@E3AfE=l%GEuU#&^upDaJq+Ndxdn{1UYI7V9$zWwvLt&Iu0$&Z5n zs%upjAptS&%UY!fi+uVlw(dDg*I&uEh#;+8!}^q9J-W^9O)5a()KVCsnrcuzjHv&5Ch%wbR)e~a1TM~a|}wd!h@Xw*Rs1h-GoT8 z_I|KN+0v&6+c`lFjKJ~gL14(F_|#E3WNO>>v1p6!;B5rK^sl@8xd@`^U<*7w!uhWY z)11YsNWwmFpc8Cj(%#d3a_<&=Fkt+|{~7;JpveGEr`1akNdWUcjFVc$X=3)me$NMH zMQsi*IttyD+>5epxM`Hk2+3-4r3SoUwJ=K5x(jQm;78)Mst37F4xX1FY5=URx4yK5@*^U%bfCH6x*`o1MZl0dyzUat+2ZhT1>BHYE z?#H4Wk&uQKDJu6IT(`@xypQ3$(kd(Kt7tOZh!fD7M!R-7T3dNV`5pc#)j8t&(q@UK zOJ7XWF(hgTsc88IZdacwy!bqK&RFKxHjMB2^C!G+N(?6>J=7FkLIH1J%}5u~IC$~s zBxhtlg?73UR#N94hwQfX9Uze5Fb>2C<^~_7JO_|gex0T@YG0iioPQ!Gzws{ z#XzSrm19g9HtuN^Rz1>_uRtJu76eS-U*X4Uv2AnKikQ)9uviOI??8E;MP8w#QsNlJ zepnku*AIlS<3($Mj!gZ@+PnDUA;WO@QaDo^`Ct1^eY*bgbZ3`v^s%`7@j$I0iDpqS zQwgn1)i)ykaHv}+eF=#1f%(Cqq55pFt)DWR$J%vy8_(YlA!U&L6bCNv>L)e~n7x^T zxL=WWneudKhYO3%DJ$CL5tDn=|CT&e%)0Ei+nR^K#&)3yyf*bH{4%4e)3F`fsD(iC zZ4`mnaT#Y#wD}@Y()rSScc;2^1a^bCM}~|CqC=7ETmg;u#z2igo)Y&&PVHA9T)r-z zoynZQ{+SLVFEh~}bqJg@dr~;sfqi9?oL-hyM}>=~)lPb{bNNok>rWXMfy!*AYd?WF zV`5L+G*NUKBZv>T)>!0qSCkB(Xn*G>F*nlbgurY*3@-)9wYZ}LROhfnqm;;Ygbj8& zN#Kk54Y0YA|Hc0=wS0~JncU~>?wSD;$g+1Vxx&4Pg;68!^~l_W{GQ)h>D!*a_-|Pe z?=}h%>QZlhv5!xvRUnt%G#?KA)En2L{0gLa+!IXylYAy7<_{qBfJcX}T0Y7tv`vA* zP6yZe_vmizlZWCStC)G4VK&3zNvC>Bm2)vO%#79Y-sbP<_8de%)!JrsfPB37C4jMj zeXRj)?WN6-rAN<*T`$mT=_^?OMDpn9*wl8TI}FHvynzN&9Y%t+_RXC4Bqyljsf|z= zs82H2hqo`{O3aB?H;mutB_;lLL!uFJtyH=U!{E9c9KV3lVvg6j@2W2)BYbMXu=$Cv zjWe~tWq*RH`6J^GVn8ll!df3H2$!|OCbgZMMJizSu>ba5MhhoYHf3n-`v5QyobGCJ zS6tm5hZ`E-Q5}ZV*a|<~?npDoZu1BXDqoU(fEw{j`H%NSUQHkH%&Y2P1{9D(?jzvJ{D6=8;mogQo699 z#eYw5bi6KecfK3YS3wFvJip05#hWMHL1~aj2z`~bls%mo$U2DcJA-=uknd8w?|HIj z16Dm*=xwzUr_gWx8!iw8lPUU{*6@L=k_630wdt95%@l6P-!#kTIg`Xv*1E^ zTIWCPZ#O3L-Fp%3Lj3EW1~NS^7zvyJH=Cc5`mmrAlk>5eh;3ttqP@A&h{y>)w}X|Q z-kFeAD=brWv!5L{$EqAVed)@s9X$`1hOC3Xu zCP`6$mup20I+?vAhw=mdZTASG6UQ)H2+fuXQwn;og%dZ@kj7N0C`A*+F5#PbA10?)cRv~^*8_iL!+O#9TSUY=n_ew9DK1V^t#s+;sE%ENhJUya zh%L+78@DB4B2U^rHHSUTH-mZ;P1(f{KYqNj#! z{I&oOzDUntwJD(@fXXW^1&6RkvuGSDgCz1y0%8!2?!wQjvA8{ z6$|%ruS;YrlF~4XDlb24gX+}IA9C9)>t5|G-(GynGogbl@Wm#lrL>1Zofqzs$Gox0 z_m2i|5pUrn|%d zgQ7L%cJye4K0u3Q&%`{F!nWH6h>$T6q%1~p0X+Uny1*G8ajr1Lv{CIE8`bjaL4741 z7&m7B{(Nu#CSR?T=<;y7#`r=bODwdPP z^`OyIicyv|ZGC!5b*cLSPq;r_I=}t>o^)ZVT$@fAQ(OrP&_#=?g7adPA%M%dttt1P zWne2$oS%o0RF&9!`}+yw`J6V$uixnW0b0+}CSK#?JB6fdWhS6LLY_EqVrl(w z*_hC_+FnRwJRHqtYH&WstUZDy>Im@b!I2{OS1I%6>WDPUzefBY3g|fsI?^3zuQrn= z^NP_CUyVs;1B9>EkIo_n!@Zl*RI}chXH0bG*CKjKo0^i1 z@|h0R7Plj!WN|VC{)8dtb={sJuJv!|&BDE<2JA4P!V0~mlpe`D3maEjpjoPA?);pr z>B0-n9r>=j9t&M!uymFPEA@lE-VMMM!Aws+$DDfpG?Izm^Yb{o4Hgs^(AP7{TBc>( zFaaj^&ceY(2c2@i(B!_M40iU8&iOK*g4_En3=R~hP;|;fPKfblX?Q!ow?!=lN11X&`-XWGc*PWlO+Q%0_w1tS=x{Yhg7S#81;nu*>Y)!kSrklrqrvscNCCNOZy*%2fWgkgw+& ztPdPJhB}G(3-oce0H5{^wLh%^SAG6ef`7E`&08N(|5iHtLtfr>9cLKwrN!#V(l#74 zvbFhVOw}?J>N7Rze1awGg4p{C9t)nHiz~E`wUUqZ=Z_yf*YvdTSPV4dd7pa5WmyKC zdj@sRo}Yg2?5GRC9}XYtus<|F?W}u`K3uddms^y%>z&+qSVXWy$CIh3b_>Rk3`B2o zqW&BKy`rHK*voeC7{?Gs>=x%q_i%S4;MACyeAC%^+i*7Sp#Bb-Q3GUVU)E44r7}C% ziXel6Jn)Vi^oWM#2oe^vDz6$Nxa26nllE$+Zy&UlYpT&SsG(xwyZiB6r;YZMxf1o3 zUeu)!VqWmWPZpBcO)5+wa6P*9t&i*;1`$a!Ue9&As$up+ip739-+R=FJw_~pG}49o zFFmlMaA;()Q#iEtEt)K^FMRSep-JyE#>P4!3< z!=ED~LTl$BNw`g#c?WN*oBN}UioeRdjfTI4t$>N_xthPVAovCJ+nxY2BSIHK?HHmaG4=wffmS~a=M5m~@H+AB7amBxf zF*8!KJ{m&e4%&ENtYTW>*lwxtC?45Ue9!L!;v@5=JqVS(=lp?RRWLMMR(zpi4Z3Bd z-UayvR!!xU#2@!2b+=B-`#;b~V?}f18p~+7>=iMq7^gfrJ4-0~mA$)muafgbr#S7g z()7MAvS3T;9>o==dOptO)^Fvd6x8u?S!y!5-up$QXGLZ&QxE`$?g&aQ`-P9L2IgGn zo;Zra*uqu%-#6il#?iXv37cr8h8H2;OBG-bRB7H;Id=`0Z!)n1YS@hf(W)ARJQhwC zRhGr3sDiRl*|3No>lg02>0Ee77iC~0HW$~H;L#4>v>mDA-1RdC299Y+O8m<3yu@OzGrik~h`N-ko}eqkK}0(SO=K%FSj5 z)sEg>?ESig4R@Y3!wN>8pcX~IL>*#_CToT2BwdYuUp*M7JZm&9L%J#b=px>JiKEa^ zQzw?v6}t&{EdY{R$wZPD_o|0klbt(Zl zE=;StT7D~Sb~#7Mr+(KIiRO-gU{!+FfXTv{9wW=Oqe5s|mmu;;LTKKB!$Flk!Zg!L`K({I{meY$R;s8;b zd#BC$W0H{>3+wV@fmrQ?pt7~buh`4FpOs^%d3jvg2r$9{c4qqzHY;OoPW-wsaH-k7 zCr>-`(@A@8swK|sluS|0t)R}!9zAo%h+4QPAR<>b_x-U8ImSOLL*6dySio}#KHhIL zfe2Vpa1>X|Wgfv)8y5D{S8ocZ(YPi;SQX$Jtrb|=JN?V@&Hq>vyf=4@#1+c>7*DbH+V>O}dn4)bP5{a7ebXyMM9p~X zChO53J8771DbuYM#ez#JM~i=|hj$6dM;Jvwr+g!VPEL-h)C$9Eb_|V;uW^V#&-h5Z zpiGgLrmq=arm&PDX@U6Mo(_jpQ$>=WduVw%A2{;=)_e>H32O{|Qb^^w@)mW>T>IIz zbF=Iw0#`b438fG9JnQ{lm*)LRX%pduCo&Ba^i09?&iPN6y5wm@Gm~t!&*G;nm{oE_mEl=`X%1Pj=J45cXPS1EvGIoHYutLbEZYe>0ok z-fGfuALJ*P8rMpAAq!B4esii?077*!XRl2*ob3wY{KZ~E=_dVyG!#SLD_uiR# z=INN3@t=AnRQ%UnbfWV<eZW!DhA>N~&4BT$Daf%J|X8(WGs z4gbr7r4fg_Hd5(tCJ6DYzqXb4UXPAxu*yLv`GX}TY#g`xBDEetu7a34Sp@}Y@}2hs zr+A?;+~CBhm|Jr=AAmmq+IWItz5SD>2+OU|NhNs;?%`zh1|3$CqufVKqb&=?MDMRZO zvz)Rkr@h9nUyG%4N`SCFDBAy6t{&BzmBO?2$rBWNaKySq5T#p>r|8J3%6z>w=UiFu za03VA7Ki@Hu06sl*Cci+aH=Mbt~F<8_EW3nD#OEYe4kuL`U- zJ-gsUhyuzRA}K4G^k)6Hh1lU}Szyi1_9VOI_=PR)!H(2d;?I*v_iH5IPR+7X?2GiH z^c1bg6;I&bOQLf|9AaMy$u9+qVV8*?q%-U#SBB12-h;H-EGfkt&(t7$67RhqQ`_1v z^2)9Pcjjz50nv;a&)fcWSu0tM`_a&Ojk8RUceicr5c)QWzT%C_X+(yY-7r9)J00lEvNm z&#*1qqZ0G*Z<&RY*m7!+lj;5xmYY#2K1`H%$L%dQ;>>s$p9mPlP@4&*9^5S@tKapX zPsmmnY#HTcN7h=CHC$#>%9Am>AE*DOHU1HIy)>36_L@Z6gLQ!8FeEJ*DHr{KRjYLe z1F~U%KW2H*|49ASaeLHRIp*erX+J8*&A7kKO^G>PmoJUL5~G9+XcU1Y31>x{{>(am z58CW&s+06!q!k~Z>|YU&dK~p%uA5;hVXbQ~^=;Q7>tcda$OOvj(|yOwil_Ogd*bYR z(RXGD$jPPYf!Y?vCy4Q@dzRO}-32p?z604{<{H`G_1bYoo_0p!Y0f*pI^*O^y%_T@ zkbz4mp+OKC{WpiiL(nv$TRE?)*%rwP-7fNaS6HT&_Y=9SZGuDA*hW*F#+&aX%aDS{FPf$A}}i;AV zD#yWJkF1N?^RnAorI24vRbli5OAf!SP5{5w?S$FI|S;f6G~s39&D?Cgwhg z7EsaQXh^j?gyt$hML#J02OHyh(am$3d*YzEB=PE7dx~%ghrZ;Y!CE5#V~-1`1mOy!@B zfDljTTEMgOOj2Dg#!bkq6JTZuHF_-SB}R=c{OC9K49?H|kPl`D*(ii>?)$slx)mtCx>%c^2UN&E6C)q9~RksJnD}Cu~bJ+styt zPWLcj8Xbq=$=bV9fS83BvL9T>|D9_KTk4ChQWaB3=i`4-uh3J(g`ENWQTE4+GqY|YB1b|*kPVc?rr`! z4OMkbCC!b(JWhAd&Qr5lb?lt{^&;m_zR0eR@Ulad4W*$j z?2jS0T?}OY5v{H)9G(kbaNzCX$=~5IIgFudFY(3wQ^G`PX^0{R@HkwC;?WTvqWfbyxkZ zTQtyr0~QTjE9eHijGHEe>v1sd)4||nI{7_s4_O)V{Rw#fOYlD(3OZ;mE65vE>97k- z>@RhDZ>K=AjQa5E5wNpw8ZJ{X%y4H$=h*~TTgkx!yNUwvCm?gCDA)U@z134&h>~6P zcZax^4&`=rIdq`REfQ1^;-*p1fm7u?)!kk%bw52`YsQw&0tGLSU;PyS)HnokTlW6C zptTg%;fXyxmpWgX7vaAisQ=f)-wSX!xlhtJ?!}T|nZPh7MU>$~P$fRS z=Q%hu@7)Z&>{?_lSJw0PuHE&{U`A?j1w(pu`2}c}QJI@iY3A?lNQHDw?AwkI$e+IK z?sINSW^5OqtCL1TivoPG!3Z)0{8evU+{h*FF7@e)dENejsf9b`Gp`y4?tbNge5Wt* zRWeeCX2)z6X{#w;o{iig6BQqc#@_93RL@CCO3BO%D(zQs08?#s&u46MBk#KJ;!Z}m z_*zfsACn9bgZ^6+YJ|QQss?ab&!AyI^|8&sy!JibM4y&pY1)w;qgYRo}g#Sn(6p(_An(&FVg@=4|ET6rz@<!2}Ou`Uvn@c4F6#_mGfr|BKKJ!~dczPLXaHg%-d79mEe$?J&PyxnG?Iycs;FYJ%WKk| z4tpmJl(HD*0m{>kmbp8FB>}Ghynq8x+j?SMxNthlh{i`XYL`d z(IMZ9m9DSlT?^m`5`hi1&gaH#RsrdK?0{p4I!m&puj$KntEt49U!^N3;L)LIr%wpb z4p9?+dhW>aN=v;Ufr?W66VlrdpA0m6{adO+g2ojvJL_yBV4zCRY6qFdG0^7>v4U z-h5}@!nv?0ceiu^U~69J!+;)Q_s@!lwKf0CI+uYlZDcyh6p%u=ABqrc(t)_MFZf71 znhBFW+%UBF-0m}PFXM;BrDYp_jy#j_da6|j#5Y4?@N7kQH$;T`PShgV<}A-ddnlOv zt+``j_7y7(7j|Gdi3kg0-Voz$A9NGQRTzC!MPx^g%Ffj0@H!UYwdB#q`@syb9&LU) zKN0{4OFjgytijc?+66D2~|z4Dn%!a0#eIG;cF?#hfBc zOLrD=g~YCtrD(Jp!a{q9(+RnVz*VqvL>e$wvYYThAdj|2NiHv7?Ul#oJGQw84*(w; z?v^m~oHGcgd`BE?#lX|$P-2`y4rq!C89!OCrBzFdP7A5GuGkQvrG1Ru?%xY)0TN=B z)?76f*^-cd2CEHpR5j4|>PrqPu zPzpyLMXSs;7l8yyIvA!1&QO1tcD8SASyP z1q=LISvc}Zj8f`#V*-}w@&QV|gZBLC1KvKWXxB@X%c|}Aq{eLBd&)qw%rZWlu5)P~ z+0A#}fQefFdza!lSfWQV9|krWsR_HZ5*0Ty0E&d}#0#I%RGyrH_k{aV=ExT-WrGxLv^er?APgUVQ;55Q?fn z4hBo%o2pMh4Hl|B@F!gYxD-fWRoCeW^T> zH8i&l`R(nT1O80bNc$M6`RndwEeCvh`%%BIqL6m^9ij1Bg~EMHClAB?s8Gwg8gkI25MuM|^U4{bA6Pyb29 zf$^Zdgvyp6WbW_jp&W8deD_T*%1-viHITcH+oHTo5Uy1ob@RQPOtSS*oueaV;w%V^ zpjs}@V+RV&?ulgQbGTL}0Qy&jUHw_E%jSlI@PG8`3+!zQrtXe*H@iWB;0x3|G=8X% z^Eg}DCWrs{*Nu=Wz}TPOf}}cH?wt&9d#;cw-AE`S;YVGn|BU@Vw*l^rjgd40fI9TD zmAJ*aO{D*Cgj-;Qq=F7c&mhz^ebY3HJeS}6-}qys;7cy$<(LovMpiGlu_%srZMct~5MUkjhA?kze8<0#5HBdoPphlSY5Leu+wd{Z2xRCH4H*ulf`TPZ1uEN@RGmA&M zBGBnY#uDitAOH2DD@mjU8PohaD(E^-lk)1C9tuk?S&CjZ740`{f6>VjkU+Eu`%_pc z)qz3ryzFZ*FEf8yyH&%tOc4Uk$ZzJpFMV^%eO2yHFW_I%t`MqF7mhpXAY@*7ps)d2 zB4E3Z-PUTOOk;8e#hSTPl96Ka3ralQ%WL1Bx@k&i&PG>(bPdc%Gy)3TIPn-4Y#d0M z!`sOaj0xjDoNQbeE{s|VTw0AidNzU&{lUs0s4A~$tR9oSY=CVqo{*$5#nj>f9aVNf zxfq~*^g|>4$x11H%rvPm$DQyI=QRP38g!yn497g>O_fVzYnoZmf?QzxEl5RnM+rkG zf+conX5BOu*Iuo4ML5!-^Obb|&}ORr_fJqyGpoX)p|XT6DR%3vw(gK%qN(S89{;WE z(OVzvvC&%M;|m4#;iWK%k|a2Ny~2W6MekZ9?xGwtw2cIU`^Zq-jv*+Mj+ta4Zo9C> z!FrjS8DaLLjWw~lQrE=DGQPhIcKFlxU@SpvFiKzX4U5x%96SchGF?TC<7KZZTEzOh zRx44fOvT0Xnjl_%r_-88RWPFsAn zAhb$)Rac~;I1VTi)pXIR*1SoPOogsCmnqrbC~NVn3tL@0Mn`B3scJY!*w7&yS~5^u zcn_ajIkB7Oq&i|3IpeclyN|v8zNsi)dyacstzBdF4Xy0LpIXo{BdETTY$o{Uhj9CXL6q$cCQN8YpXDcsDG z<;PP5mkB~Ae8|Gr+AjEL~XOkZ1XcQ#9iIHf2@lax04YP0G0wx=sr*~TqV`t zw+gjfB4Sx+EjFaJ*u~1aZSo!hr{=pSzM4#!T`2ij{X`C;+GRFLr1=q-nRGIB3rqLa zL5KjhAI5(qgbWIKIvtrQrh+^mqWO21D^Ml4R=3hB5hX|KTbGZ1 zQkGC1h6N{h7|%|I@5~J-F(pAE(Bfr(U7i&`*9M|#ni|d;)1jb;+9l$3gR9Ww95j>0* zQUNRGuMuO)2I@G>(_Kp2|GqXgq*@cA<1aHeIk~!TB+ko8ZFLmrIUTk5^bbIgDoaiBs$3>=UYzQ z_{#j~kY6)z63!OHr&HIaWiWb6o${<2s?5>ttN(Gt_kLj4r0uq}@aqu?;7Erh^43`! z7X&=N4Z(e4sK%RP*+MAdMq{qWW8YQaQ&P9VyK%EK65`zg1fB1%k@P+Cz0StnPiz$i z`>V3++e$n@k>MIow6sb+^K=iOGZ`zNdOYw#(311+9dO4xs`Wh${O#S6t+5cMuhS`r(fVVc11m6Q-TCepnZS}10Sn{uNGc)`*S=|?J(TSkdvR*ceGk?wP z;e%|42Lhyz_33~Rd?W$-Qqr9FCpD$<-U4ssH=xJMa*QdVj+wuD9v$XWVoCx+_IK z!#%S8-3bDDmx7l-i1Z1fPiOV_?$vy(5Y5 z;nx&CARyp$8Fo|xJ|L>S6&Ak7;h$8p(JV@!*FR*K!*v=Wz=jt7XQl9tShgPJkAVndiC1ioJj*$^qnVjw^{8z7zlq@j$6Lr| z8S0j^1RFynJC{ic)=Tpq-iCbtI*5OooL2vsb9e>G;rq$p(rV+M_2BId=*MTXcG?Ad z5=Tf7aQ1;Mk{-l(zfsYT{3y9M#Sjv`xhDc2E8)FB;;A8BGfM|Fj2DCh8g`fxxH>5)|$wYYWC)Nq;& z9ryuZFB}}L_v|Z|^-|XtQGaT>|N9_t4Dv2x%ss-XcF7?)`>m|+7`qo~`(Tw>~OGJ76Ah-WL3Nne__*~04y z-(8z_dP@Eem$yWGu+civ`RtGvb>kTfvPpe@!@&6bHTxlacWNrnFIl51t3_pg5Y-E+ z-4@Nj@YbcvJ!EEp+e|oi!RW<%56s9H=TPi;^?U6{d?^n8cH#bHsf%#rj3(lI7q3=< z{@u&LGN#VQ!XKwo5}m8+zW;RJnQ4Um48p)@wqDVd!{?-ChQgLAEEAiXMawOsMhYb{ z)a3Eil<`I7Ke<(y?SSxSJPbFzrX{hoR=2fBggOgKokbuVD7f_@C3kJCN;Q0PxWvdj z2fN$Vq1S=)YE!fKh;*m<~n)*)>Zjw^4 z<>)dFHQ3^!zJel1K*>r;1g`h5LVDZ9b6RTDRcBLqurKgr4Z0y~prw%hycRRxGN=A52j%Wp7tE>_X zDJgHlsIS&(7Jn6a@$N;1hS^b3S0!Y*s+aey#SS|*{hh%M$To*R$MbuCrfsO6ZDcu_ zQu}EgMmlN#FbIxAI0REU^$<{u%*tRwe4`To015)wW`>!%2WuSP&o*fF@RWuRE)OY* z8nf~`0TUW5xkXQ1C^0uu;HTiRjiEUUr@>!#p#gH`tk|KYc-u z_$aq;$lzy_v$?X(AAm?H`8c@3&*R3JZ*eGy7Tp@HLUt-RIF`O8b-7|L4 zSJiITo^oWlP+C$GTC)}17JvVst$-wXu=lj}(ohYF9`e z&D|$*xyxz6O(>fff7zy}sGbR8LU@9oY-T0JQr7Hvo*iF=JF;QgpZAE&_NZ1{ZqD$Z zGX{6oc^xM5Dos7)1(Bmf+X3yL-!=D)Z;LBHRT{#RKknVg4 z&B;UijG*-WyV(NVVyV;y!$sIa2N8m#s~(8OI-dmaXM|YdKOg8~K`sVhZaXewHKxB| zNk5pK_RtU>4Q*FcVN$K)b#pUw$tvn^IQvkbt+XyeWCLtri1@p=O6n>~>Xqgq^Q}4; zkBJx8hPWEMuC2JWz1}NDC#skLSiKQqLHE#R3Ow#sB-0dHyAO?%s>{7VrEN6fdn>=J zRG4Ny_Qp?*s<=50+C@%L6sW@J=nLI1yLXo;>_)?{)`*ba8oN<)wM0Edy22vIH*z>sjEy>z?@>IK#|=^2lfmB-Wgg#)`M zCAkDS4`Uv%2Vid;jETfHH+@Q&5MGBFfr~)uPXag+&3dk;!P$PC3@orG5HSAMYj zl4j)lonx?Wjw!4_-;O1ksqYh7Ui50PJ8iL4n(V`hIy4$KkOKS)ID^W##K9Sd8-`Tzblb3QokM_MuCM}mA$uvjdvY@rw`Si0*F2V#+;U`ZH6=`kDNzvAlZY_(UeGUt3--VC!4`wU#E zdTKXK!g7gASFbdzNto8=ATy0!fJZ!x(<{xRbYD<4Rm-8-apzz?;tW)~l9Yi;9`cGj*yT4OWb5Uvs&MiN{$9 zyJT7K9bgR_e^S@Q|EfbxOGu$Z`ALw+BsWld6BoC&CM&ayFriW;?n!XsthNm#f z#?&M(d&#OCf6nr={i2@gvlp=r()mznc#S*ka&3K;CZ1-WA5BHvY`LvdC+^jY+sEk}M3=By0gt(e@H%b& zI(2PjZIAJoMZv_n9`jd{{;IH(rA{3d;zM^eH39@GGRi@Z8vi3BTn-tlNL=C|UOA!! zC>l+!V34gqtptm>BCEwjwZJdz+aqE3!|oD$7EhO*-yI*O-%AF{BGldX$GA5(E|lKT zxkXSR$6VIB_JJ~2B>J7mrPP)QBCZj7^% zT_0BpcrAtZYM1F>XQ|Y0?Xy+d&n<5iHT(@4?O=FuEmvXSd^*RTI{wo({4%iyP|-e% zTX{kHdWw#@`Jp^_9pB+(Dw=g382Y;z@E2}pjBU=U7}RoNdk7~@S5pszA8y6YIXMN{ zPX5#@JtmMN^_}}OnKSFOs*x(I&6pt7P_`p$3`%s<^MX?JCbCjn%1@6h_eTb=DG4X%j6JL8)vq*$n%k!L zLC=D!`uVv0Cdc!WJC|dtt1$(FyR)*&Azm@-Q=nhmcEEuUx}&pT_nK*SN3Bl7_z3`q zE^n!0O`|c!cw1!IoGx3|+ZVZo)B@&nSWKwxxVm3d&rl!0)NPx-AoX+?C?F!J-fW3 zu0>n=ak_OUhXl|AL~la|`?)5J@@XE3pYb3<1}Hu>B$Qm_n-Q;}Q+#r0toge)%D_l` z{$At$ZrdXH@X$u|+*-Froqc=B#~b87@&_5m+zX;2k&;o4wB+<@QoM9};M$v##5$W= zdZ`4Z1m-~7mC7#QmX=)|!B(PbZI<7F@ zBWm<#Ze9eTDN`L2wUOW=skF(>wCs_m*Bk#)sV{1xmPW%BuDS&n{rRE{j$pAvX*$xV zbB}6D>H4ID?~~r5_O04UyC!2PNMrPVzuz88Z zctciCK^B9fZ{g2FuuCcp?(xSG^DL+?a@mD?OHvR1r>OJbw%pE_3a0uhCqlvJ7qD-c zjJ1ZwEua3ZXA4IJRD8HH1{O$yQf49y7v9v1ach6N3o6KB>}v8|UJX`t8iqsI;}d0jB$O3aDQxs54Sb&;Dd>!^G{m`t_=22fsKM;HZ41~q^(ds5 z8`FNiq_3!-qEo@O7Yy5`V=a>T5U7^}cu}Yd74FNd8OfP2>v0HxAAHb9o#STuF^K87_d`;= zMFu#kMrHWnSHHe~IZwoH8(tJs@ag1dxemtMLMayD~XPDywR-iuOF5;*kpr>DD!RW76E1`9FSWlpBvq7J1aVdHIoID|${R z)ao`G5n>sJ-%QEiiSzfjPSBAtIKPK}1|8s_`(Gc$=T(@?rdg>MsE6JImUd7*I^#Mz z#gev{o0XA~Mo*RQ)tyh~N_gJc>U=kpEq z3FCU4F%PV}xVAVbQDHk&$(7KTk9oPJP&XOruUWwOrl8`bewXj0{;+MwTB{|}1{cB(GJ2030Ob)`Nr%cW5p}X4+58-GTPA73jxpqA z9ks7>beJ#_-Q1+VhW9V%lN73RZ;o&(LJ0!0jVq)kN{iY`nktah=trwDDrb!OSkpi0 ztnes16{0nQLq`S%7Bq~q%gv&)+w5X{x+|>&sl*3){ed-Sa#f|Pn9F4{)wpV6NOkv!-M}D&ronl&%F9c@<*Y@{a#Sppr3RG5 z6Ns*G1B==FFUY_S>4fSL!EDA?D*9M6M?z4Qnv~Ba6>5FgeDsM6pL$e{i1~!MmR>^~ z)L>C~NwmphE<@C`&Rb(s%MMl3UfiEcM>mg854CFdXr#4w_d7_bmpzqv13$3jOfU_5 ztW$q^pEBBuw5?9U4p?-?@$v^rflP-JS=( zH+)axggoYI4*7&%c}aXMb2^Gw`+Z^C@hc@3y^T29u*(IJyc zU1uF91LNXMD<^HEuRD-VHPbx>;2Om7&E*HP*KSj} z-tGyw3w>!m-#)fIxa4~$R{P~(KIpmNRT}|1HhzKXbgpEbo2I#D$Vi?d#@9-a>4M;{ z+(0xaJuHJPFgC zX+5x(tty7YxA$3N>#qCL;Y7(b0rYAx-f~Iz%jHc#SA5A>u042o$Y-AvDx+l9)U~x8 zXB)|{%ghma2$IWlwN3K6<3D3@kg02R*qAc~2dk%+jSS+uXTxAx{bg!#4yGHtgds4X zA-&1pquL(tJu!*UtpIV*o1h8#T=mHa1;J9A78oj&b+or zkC6dpI6Q+c7k&EW?D-CklSNEt-ppB%ViB>2xsnnEE7--vUpe(@WljFe?D^kT96_y> zQ8jA*E)L1Vy4{IG{@O|?lv*w|kIqg(VXEnPn~u4#Zn?*K)Uu;;IB-F|h{tQb1*4ZQ zZJwN(N?!R0b7NL7>zLGNhEaHkg2~3FP9YXn3oEBD#S2H@ zmtG?F&O%CiyB-SJFj5uuVz$!^4Rf{>;KYW#Io)iREOm|^+DW~pd1Y<89oXM@J506y`A&&ds-MZ14BolQegO3(w?b~@eGm}g&c*b-x z>GtSpoppG3zFM?Owav(zM|HC@9X-K^8C6JGo`^xA=TJ_xoUa6o&J|WMWCwQw;hLW7 z-^IdRpPpTduCS>GV82@dgkU;=qT)%j9GAD!;u3o=Z{%_$oUWqg_fM+lLL}@(qX_eE z&qR)v#Swu|`~J5s`M77;Z%fy^bj9D1-FS88@2vW4y5j86>S{)#vZeVw=L9!RQ>)a~ zLK>*6bC~G*$XBPW|6q4-6S(HnsY|6lW9n@Y89aj%0(dATGW!BEf z)~x2%$ML|Rg&+`o*%;og>{}f7roUyrtNfA<jx+2%&g*1s zp2~1duCwJN4}z_yN4Cw)YO;c!ekAG6F<^V;`O!YLc!@}$wtOLZ7xTtco>3f3fx7_s z8as(>u-9~?Xw~NU!a3M^Tlnpu29O#hY-89WBEC@^m_%+s|)55pu=gM7#fO#yauzGGeFZ zT*Fz5qB=f4adf|Ud3UTef#=x1r>mZQhi4VUK3kDE)Q~VWRd#uUk|Zf1pnFq&A$FN{ zJ%67!yIq8^?LKmq9fbV~Uz*(O0rZ-MNZa6*+3O3%*f7#*S~j+kReMz7ASgEsK_h+b zls%!Q7*ik%455%w$`O;47TlCYQ_C?WIlY6&+X_J@WP4*4mQnG+XH0 za5Q|Ovw8X1Ms?6}L;(;gB;*nTG&HTCt_F*6T+ zuJME#O#!xXf0V!WymI=HnS#Q;tkuHZcr#!xd|N6=lhE zCswANkM9P@_7vpq5f6Lc2{5-X->7dr+k>UIjXay-_lwDf!=r`od+c0o)Su_a7K|Ma zBnplxWjWSVDhBvgSeA-mc5jZei$U>YJV2!f#5)kwn1TPe$v#A?KVS9{RWl}FGPiOe$ zZAM0my{X%CIC(uB6C0Lzc}yE_RnV^9*4}YahIBqTqC!r_v(x`wXgd>J|nG^q(GCrn% z5ccRZM~48d>hrMD?$r6_rYhQGCz6aTmodwZJM7m~(t=D$GUwwaMAbbPf0{$p3H7Gp zv|%$li#eobKNhq;_5&7il!@1geX?Lv+}E;{=|Ma^eWw1QPFAw> z4h=~WN(21~EMlhs;duk?&6#zFCBtU?t^6P#wij976h?;8xvg2J>*^1J@75k5diGtA zq;`vTlT=4r!$xDfS{g<)3_3;QbAdKDaON9_p)WP8#;%cZzTZ_H>p3yiu=G z4xDn8ZIbEwl>HH>)kpGxMv44;Vb9_Ti4D52>|Pg;^D526A^IwvYr8nCC5a6H;R$_O zo)GD+tNYaV^*Gse<%_J~wTq)8Y41&K0;mpp7g~QsAqwWva!tcFNaFCvo+M3*>23Te!t`?*P}Woaoc9W#$Ksm>+VvA+AeiDbYU`S;Zkho*BFKEu+>Q{D< z{duYWx&W&5Y+o2Ym=^YQV%Uy=@4P?{GqFuP#2%Q<7UgR4hT7V08DR}pB5z+@|2Z5@ zvMSvTl=5wyI2PQlJxJh|jX#*<+hNA`|1$)V+Yn~FO7vgF!KL{tW+7TZU&=jsfqngy z?z@>MJM$G$#^bcbK+^i8;s;QiIzqEW}QDwdG?dkR5?d>z{!C&6rZ+iu78&6IXG0KWMLAXmQ#RxK_%d~+Qi zRBe9$mVMe?<^|Vac(?ZYam^2OpO$y;z@b95)5g_7;1u`x7Ui{wSLTM+j9j%U+F^rw zU2X(!PCM2YWLSxj*J_EJ5{$j~(X5#*t3bQndC2 zsJ)Ocm0lP4b#sU@^Z4_Wynas1O-SUpme#7_IKLJ~YX6MqQFV72y$lA+Jd3X!eseCG z)}Uxq*SCh#GjU7x=9l-qWF>Zo|B$|DnW=SeHU z`w2N^OZX90RkFjwuy=H(V=M^Li5!hGQxDflqosE~lUeW}>e0t4`0+6PE;mWX!+BlB ztzo3i*wxvZU(laD>T5NZtm}(To%-wf@%f+cIRRbZP6Kn>z(fVA6-OsLt1XFg8$%BE z=N~GT(~cv*E80+1tn6@a^gY8hwmMNB>?yh85*c2?(! ztj1>n*P-|e$Z>?G(=a>d8!ynkkP?V#0kjN~aVWZtC=K`113DK%kT9_W-lpWGK3T&j z`Re`Fz7G`Ze#q}}vYcfJjlg?A7Hd9@rUf0QTl2{;inIB%Yxy+DOSe79$DZ|5L3+Bs z0^aUaFYwoIY5~A76zVr3tgF26pktSs07b0j)v;B|tDL!{Mm9Yw&3R zE?-Cf4Fw0D=~lgG@1OLug6gqqP}pTgv&4q-{=#(73Z$9V>s~MGb=`k2d?4ubv+5{X z+$*o3AnT?xft${P594y!xae`Bl?PBOfeZbtE5|MGLzcs!0IZORfuRU$T^8-4VBAYD z23}43-_)`rBgIe2z--b%wPYWimcewkd2e{umI$al3t{uJF3y{XGBtR}iz{BEZCw&d z+@~)-{&$CF1UDULElS+)`jNnoM>ucXvHb1d70@7lLaFu_ zrM46z3Je{C#^&;Ac~sy|H8mYMGM!T|zI}falrpBmcDvdV6G6fFcxB{j#y#<^J$nUI zZGn5V>wM0MoG}#jJE!2x6E`|Z z@H=_L=1_rMLRy(iKUpX1Clu?W{b2f@1%n+P)`yy`bLQM>efRbG6SbO8xO3ngS#R$z zlkm8pOOZ+|-us7&gBc#F%PRY_Ut*Sja&&%_E<1p?2RZqaYFExQOwk2 zJf3KlXo5X{-l6hYZ8he_gyz1s*dqJqk8T!at7)W(V$S#J-!76uoaZeaGqN;HDo#+AW+X}o| znz(n)n*rBAHYEvfDq(QDibbzQ42`|}q&+51wJ7}2nS3KdjXOq->47cTYXQ<#Z__U9 zyG@sA+U_Kl9=?8J`gwLpXQ|t-uSO7WCjixLTAWOFPfMg(m=!5#uHf?3(}2kl<3x?% zU^1Jw9_ky%;SPifKK{j$=7~QN9jT>Ul_LQ^QVFz$&ur3GNTIW54jf0kE*=yay8k+C z0X0I{9+s6+Xd^~lP{*wwscID#hGcz~Yqat4E+Kv=-Hk5d@5R?@9ww>TO@BaEi&r|X z>-Ly{TD6a#oMI-Sg$!bM$XsL=Z64U{ZuAR)h0D$D?6uf4^IqP{)F$ehmtcYS3X0_Q z-%h-fCY2tGW#rdGm@vND6Ki2{mS;aQo4cuZ-*8}S8(U2W-y+BLza{K(CVjA3SV1*9 z5jJFYebJ-nb43A>P1P zVZSPK1nHc(#&g%vCED9UsZeN`3oU#rS^{QW>U{2W_Mpur8vd9MSgNo-@*+{`)19eJulnA8#&$7AOE! zlJaG5>#SOp+@V-L+ZODlXxB&;)B}${%`vzPQM>KHZ!>tQTt?m+p&(UZ?Iw`R(Ts#Q}qBJ}}4BA|Gd>#)uMUvcvJ zWcXc-Nh1VuF-c!=qY;^ZQoa^27qJTaWZ8DcJ@b*tv(RsX^#fOf4RtcR_Jyd-iItLQ zzmL2|5}9-jb8)$@MbW*H6Oyzl>S$n~=LLxJv}fn@GHtiM!?k6p%U0bii-OkD=i!gn z7P(Nu?tT7B{g)o;`uG^_b9vw5zKV)pGHGk9FmHr^V3w8Pra71{@R6k#W9nJm5iISm zP%4;MswA5hTj*k1NE5nGb9or7V9qo%`u#XLDjviR;}M99gQQ>U>~W{`byv%ZilAY; zPMkoqe8~Zp1xsJY!p?|Yw#tZlsaNA~ZlbXi_OX5Vz5Psybj+E7Ym2nEx4%1$U|vOq zFZ(meotWX8>x5D5n8$wE4Bi_RUViUS4O|9(e{~HFIyx+$pIWBJ!GO{C@{8@YwaG;$ zgEwyC-1vG{Z%8T93_Efd=VSg+ZdDQ> zxtDQhk&KduZyzh8w7V^>7@FU?VPW|962e!`OJ{b&OpVSc-I@~c;(S^JA`+h~MP+g$ zrUs&w3CF3VbcwJ0aJ7Y)_O0)VaBux0lNfx>IkI7wn?y2wmFR(->boy%e&1IGczy&v z^FzcZn@;vLpKk$MEG!7MfTu|28e5`TcrkjGzI_cdn(wwU;&qtOEz4&#iPT$7e*uus z7V8jGKKPQNlsg~tMjz_dH7*+I$a>`@)9rs{H%y2hq=UOc=E^Z5a##`-s$17FZ?0xs zZJCj~jAaV?h(K(To=)Yb?$IlE+Yg84%^e^3=UbV->YnK=(^5uxowgtzl2&pM?|+R1 zdY5C!RWF;2>1LujuH>T?-9laHxe{L_$sP8iXQ~_6Depp@{W@~HD>qwCbh#Yke4X#tN6)PjZrvgUUyUINuaQUW3qEFY_l-A^so@MC{iNFK5Tn65AA zb;{1jSUQWXwq60u_@eu5_RyA$4BMAf)a2L!gdZOzx9R-WuZm22v`*WdN8fy+yCA~a z)?Y(P4JurE8oJx6r{mSmI}bYib{2|_!{D1tH$DgNiXHzR4Ov)he#dY*c#)bY89;bw z-^=s)tk&~33BR_Z`EmI`U`SpcQ+wgfX4KX`(~eYPdD8yKWyH=?Y>eiW^9@b0wKZFq zz_2g}}))(cRdeBhr%Xr6nmWq4BeNyJeB!m51x%Y}UbhUm>Lwezn~ zM)MHUdYZk+k65W;zCX!oH^BxWZ*ujUM^$yVwD?}x^-lY@MS_f7OIl+XiqPK8a`f|F zuwcD33w=;q%~9;95acvLc?fXcm?$3vQkv%5DKgECMbcR*8_&T78mKNMo54RhYS+(p zSWqMl(V^e&(bNxPANlPr)d9dPHmYOe)o*DIhcWHq+-^O)Cs$rTQ773bj+4B;wZtt6L^>MB zZEUer*T*Y%o4icF5wo-)k?Drx`M^3poITu44coVdLSYqr(l2JK29$(+UQsI%(-?nS zuKQoKl)sM7_4Um9Wy>Ba&%`iQ?e{8lNW5O25v*U@xN&D)nZ9LJdL%H9{}faz_#ii2 zX(mB;bYOMoV>$cp8G7G4P9L!T4EI*f5i}5be9)8Rdr~D zH)xu-9ckq0F)ZK97^}NuLENTuoCQKXKR%8|tqg0wDONWKC$vE83wCVf{$Rf|e1}P6V=3PF3~Paz zZA<7thOckWW*H}+G{nSbXDAtRo?mjnm1zqA&Lt9R%sS_fBK64eBWgN$io)_o3y;GO1r z9k|o@x2M;-642eF)rz4UB5hE{>~*jCxo-!z69!MU=&9;ar?^h4^@<-aTn}+~n3v|# zsaqdQokSFxuDkIZ9Hv3Ncg6NMHitdNyS-efF9qXM_IE-v9k_pxVOYuW%T|+UJ=icD z#`W-_&JHFqlCIiz#Mz^LHM}V03rcpLf7>s9p+KV^L?xM;raBzDL1gOP?6!jIwR4FA zM9>-V?e+SPur_68jsAFx!(tov9-41AEdkTe_%wZ3bnMH0GgiKJR)dLN^15T~ZNkAz zjw9dDTeNC;3PU#>9d)L}TK-a;ZT0+nm)nZ%at2-ev?6&&kD6CQk96rD9U;T`T7@pi;Pf8HU@S5o_(fn~$YzTz) z*wN*URsxsd9H&slZP<^(6>mc6RSV zntdGt%U5;Ws=!}j(CF0O(~`Q;Dc`Y7dp-FC3}|nr>%s5$=K2D2Uyf(yqkC`2$ji*B z1ETvAw{M|c7FxYsiU-S*g%{E0Ni$qFZcP%^Toc6nkf9cKbFhUV`as` zB1diS&BlaGpIYOMZh zJ!0M-YIBPzh%%jeEKsF;JBMPbmnvX}J`Cu+wnl2u8w&R7qrHKUqoc2-=*~+!TI{M? z@y}%DQM*e?*w8C5A>(nUZl|TbubsY=7GHwZ6S^yQiGEAU7{+>gPN*o9Ym*l@4@(rO9<=`n6x1hBuOiQcI1^ZftX* zs%9c}B%*J{@T_lLl_qru=%totlwi~;`GoRUo9&sL$6i0}p#zIf3UR6>6RsZJ6O3Zp z;|+1j%i?p&A9T&`A19JBc7N%UHpYQ{EP_|_vZ26jO)BFa_c$ey+UQ}db(bMX9}zw2 z*IKEby6f_@D>VpPS2Mjcs#6*Zq>Ai;Hdn)zr%o_xzrKG?MW2&_n7jL>h$u`8o;fKL zbg@>i@K|iR!u%E&N_*da|4;yK$9UKbWPl)LB_$jNUWXeW&Uz!1z9Ru;IO z;y#)a5CIU1VthY%vK(Z48a5W@p8EwIEgxh?z@=zJ!!2J>NYmLmipM zqH+)Et#_(a^axMFM5ci+wtY1{_F&d*hq2G4>$_+wG(k0dDl#I3>F1ho>joo|u(jEi zQ0!i!>-TKf6%w5De#=yNr30>&IsRQDmJ+WJ?yjMEc;)C$O1|>(ri-%pM!|Z_xP=|= zV1ldhQ`b8Irr0>sH1ekOKa6>lc92M4NEG+sW}nb4YAJ=#c3-<8-&Xt(R_*fPua z)nxrAe}V2xmzLDfZ6BxxM}%_CpM=x;&qWsk4Q zybzvkL+JXAjPzb3#}1E++H5bEBF6Pg3v5_bC^DBE{fR=Y8a15;OUAnG@CYPcKe%^a;GNkT% zjQ4EzicMAy^qnNcf6ecZyi*Cn%cCad{I)JgGV?h+p^|wJE~u{W@^LSERnacoa-Od% zI=KZg?$mh;s+z1suXigWOW+rwbd()snCOSu8fbgF0!dvYoP!NZBv+WsOJ@!ZhDIJ( z#wlUjg+O2z3E7nLi_>?c5ZIK`)EZB)n5Z)0AxgdDFKWd|>SPas8`HPFp&wlzWM=gr z!(B~oAMTybubmfYJ-c=Ox)MWf@_dw!#go>AfhloJTIfO-L5Rv;eZCZXP<=#Dw(lu;eSt=?dlPXwv_y{E@A zJ%ItU`O<=!jA@)Rbc*qpYol@9{^ZxdbKX04{bQ)hWEp!nmDI|(T9b=% zEoD6~>;mkBF#I{^!hUq)KzGI2jChmf{gzvmpcUo31{coVWoxsqNBD&Npu5$#wWJ_$ zmN)VkNMflAQ0nO8&vxFhFSEFnIe2xXS+F*5Xv3<|y^zX0a{m`p5JR8oO9d^Hm40q} z@9dW#3=!&*Y~huKC&yeeDeLF_YiPXXolj$kJ8?rzSF+DaK8@_`zL4Tu-4UgG(sd(s z3FHFb3EF#k*u+I1pMpxK-u<4?1$;>FRK<_K|N16))9&=!M+ZSlFkv&k{Y(vwx%l-) zHix+Sm7ExIQX2!%#xG+)kq!^l{VYny^D6?yahV_M-r&S=%*PTZae#D(uBaV(X5#h$k zqRQb0Kf%RPCd|V9W;fc&ACM)h^+8c#z3V88!pe&Ci=>AMw-zl80!iUwPL?9eB)r(j zqPtVC=H$zUZ)BtJ+jU9|gE7Ja*)jKo+uQg5?14Y@yQ;rxq$G6!lL9L^7_*}@!^;Sx9{6=9r=Z}sx)k|5UiuGGTV2Sc{ zi;`zmuoqLUw#;IzHuo0YlMB|zBw?q4vEWjZqnBEKjzNwL@{Oa|t>hs%K^LsqUKE|l zc0uW=-MFNeQ$@{livIk%M;g9SY{Gs)oYs^# zqIm}JrjXpZR(_n&q;J z?XT4~G@p73BJ40Yt+G#}DW-87Af&kwex58^AhA8qK){X&rAQ*_3LN7pKPnl7m?mhU zSFi{0P66PF$Oaz{^8&6ojff%pkA+D2K=9#~NcxG;Tg8%3N9{UT@i0clDaFF-0jm@2 zJ?cf_V2Aj222obdC?8JnSxRQW8RoHc3+ZVIFB66|L#h;#N~WHK0jM_h3jJ4;%{ZF& zv#vByd7({&O9(|z9+hN}(kB6f20@W}ow6OU1t;Nc!q6&VE>@Qx%`lq!hXU4MQ>S}j z#$MUK1LWtAZ==qT+amOC=iuautmQ#Eg14(j@#DXkSQAxApo>FGtdYW>kW6@a zq1)90hUow{GM73kJmTG8eOvGEKF-~ z?9P__uG=mC2$HXHS%_JY^%2v21&FH}k4nf4XnQOw?(lAa#b?MS>3R-hHeG%stw=s2 z9tpJXodY8IaY1R7ipbWXV6|WmW4WKOPX3=hV8Vjb-f&L+x=;KiI>n;~8p)5!$$A%z zl`gI~p?d3*#eN2uvgExw3gGFc%oJF3*O!ypy7$OleYlfL=#3Qd+Yf;L>V_t4#9+ok z;Sqd#S51zyHy~u13BO1epOTO~lpEMaA;$S#x|N{A9K(ANB9$9scCL37J26g8C&<*$ zD}aN({QO+l5uvOkgo|(PSG=A~C*`WaOm|$yl0J4=*v_S@k--jR)*az>1!``|;n^A< zJw4~xjv?BU+Nq;Dz(-1FNxn!~2To(D%hDT>BjJ(w-)?EcwS4Ip;#}%)mx)W*jpg0b z?Xhl4A1fHM6IHeJ;@9xf=a@@p%53My575b_kD!BCl~5RAU&2y112X;5zC$jhfFj-Y znva@#FmT%dF1RV-cFI(LqBwo(HP4qIO0(>>1a^K@l`nPr1rmQ|Th3BZpW}|X8&#UN z4e>J>$LUmV+QB!#c@BhSHA6pN5WgTx2o>`-$wZ;N)8$_?oWhOm-SqX8)BVi4EMbLS z8IdvU{@(qA+m6h|r5}~*ZZS-L@p9+Xi2B?%HD|-d=kv(M(;T!a+a6SE+>7u-LaJg~iA3dAh6$Hu-5%WkRUaCH9JY&F`liU&?$>b2#md zgeH$iYt;r(I*A`1_1ZJ=-;cHi6uGkuhCbw0--BAioZh$>hWgnnGakq<-^@M)4Z`iFb4{!D=$Pu zt~ac2z}?4r#Sd;yuhrQx_SPNjq_XJDF<&ki5C;NBA_ET9e_O#E{R0FpZW9D{tF7YN zmOyCKpFjcb)vdUV3}5=dx?l+@4ek2vVR`{yRMP{R9BDwFi_l?-iKdgvbH!SkgF3<< zwZ+OJ0&)7rQ2Qj)HeB3-uZNHGYj$Drn0S+~BlZ#V699AJ0Rb(lu2H-A`h}s7m#M6E z{6nid!8Ck11N{!>^Kb{7+9d9cx&^RoY7?^9vI}sC-V?2ko@`wHN!qjP1xtGm$YjX` zV#x000c?Qno+{@MPAaA*u0mk~Heu6^K8x6{@oC|cN*NmleU!*YodebonnW5NHl8=+ z!FllI_f?jYVp_1r`c-=&%}$SYgc$XAXAL=7By4_3T5f^~9qRbP3@tT;H9!N>rzFi1NUf_T^T7V;0IPS}8-SFI(X-VX0DAVfQDIsQo z`ricjYTP5=(=KBXbH}M2@7#t{U|O8IYAP_f%+n-BVej%$<>u`_HnmDiN-7SdiNo2{ z4D(QrW&`1N4mCTK_=Ty#V(+>e7}nGxeb4o^ys7cwDi^vLo)*~{v}flPi-*a6as_U0 zG;L86h;d;;ZXOvP|C2-DlIqA-WiFR^69%)3=uzC3UH=%T8bv>9_TxyzA;VX=tOt-V z>)huTr@v^2Q|<2Jd7UR8gtlhhXOk)^`6#xD=CeS`H`FPU7IcsSRl|E77RDUh+KZTK zED)!pzc_q`AmV`=FS_HEo5%c05Sv(vzLXF3`(R|4$9ywPh062De_U==0wk~mFz^Px z-OBBiXfmBRNY{e;Z92Z;Gtk%6@NBLPMAW=gR5h_xpJ-)&T;`1qHtZd*^z4m9Hq~1g z1|#GK3+VWAHgN(OLv`>=n*f%Uq|ans`x3XoXY~Gtf0Wiq1IMLRC>-X8DxWRR*(kId zC~&orUH~9XR^G+S49A|Gy$NvDutkp^A8T_H#%m%^=^-);N~2`+lj*2N$FVH|;3!Dp zceeT7!CVFVK z9Q`XpY>e~0_inRv-md7{^ls;-G?NYK5_!FIPwugn{$t?;2V+3uN)Lb|2f1vS@XEk# z)caiM2Mk6%7X z9$}eRBrhFhpswz`(8zeS*NIRWc^Z>CkgX^P!H3u=8&B3I*G@$JyyqQFlZbTMTt%;X zIaoZY%}=(Z@|;*6)Ostvx%K4Y#YY_3B2+TwxvY&F<2TIa20Jxvz4kr{wdQRbLy<#r z-#gycpRDrK!@J*9tZ;6FmP-zRbWX)5mz+F%t2fCLllB8&Y`)nUBE`dyhoyFh-vMlu zr7mdng65o`jy}XSTlVL|XZDRU>AF!WUq07dd@O{x+TbC zD#(e@9*xsd)vaBthxaxLqG~F8)XTmft%}AzUtcZeq=WY_PCkGB>EQ^`PCfENHhA&c z4V~2;*ba^gzkPSK}( zA#i1-@DKMMmgWx`zA)I|A~Wngar1$~K8|K=jo8T_Rzn5~B5@PNFq6*ruD-MKokf?( zC{Q~)?h1+ag}b5M+>c-}{YxqbJ3X~Cu`%F`0v47w$~3UuM{)wd^Nl!x6;_ zcrZBZMZ&t(x|-K)uDAYp{+W~O0PA-u2u=v~Va7X59IBb6)8=J5y_6qsk*-@OJH?^H zO~c7mrn`-eayRGFLuTKh{FN#SMHcpBsv{ZEHXTCk1-akAHkIlQ8)lcZv$Htyq!K=K z*omBF4C`ZauKyJ$yWKDodmS@liorC*6QaQmeNoZa=Z%iuChhH3tzv}^>(@^jJx{|S zc@r!!C9|zJp4XLeXnp}PmNxWb>OTRwQL9K^(2)lzoEZk`jC^z;nx>Ajz1>^MK2@j= z`at7&n|phQahYv`PHkqrQZ;L@%+ z4!wciS%Hv;Y?bF@$F5GaC@=-Z7xh!Rjh&}h=AHkx+UPxBGqE}%7+u7sUc>ycm`q)3 zI8jU8=XkEHcxYwvb6y`~-`5m|p>J2k$zoMMhO28Vxw18S%|{7+I_zssA7^maFVWJt zCDT5}wH=U>QfI_n?xZ%&RqS3ip;B&#ZjzKFv#LW}gi^P!6yJsx&VzCM@{`cU@`SXD<)f7a=N{ZIU7}7RpXF&Rj6Q~nr%=JI z-GdRi`2`P51rET3Dg*N?g5^fN*@nf-Imdead2E@HOX;(2{o7ys8s#p8*jQr&1B?W3 zliN&#jIag}96HoF7IrdhdCT?4~GQBpo>*+bu zxx--aLWg>2J8`X)EziSAC8^2kA~BkSSl$PKAO@EB6}Kkw8f+$Tt6dvvt$6;aOnk5T zxPNyip$$EV!H72U#K~F|9Ngyr>PjN1kcBnT(!d?1lWJ0HArM7zS30>)FcKdo6Xv(EfH zf)-#?JBxfrygl@+w#QNq`X#mPVy0a0sm(Aj^hcI?{M}Ykm%dY*wCSn7S^2A@M`t*2 zfeK9OOqZ6=oZAi%3T0=|MX^pIs^Oy|hx)hHn>c@b@`=UJ9s7wol9IenJB~gnx_n+B zUfoVmy$O}Wg5oT%7uRMSiuZe!RMiF7+Az_lxV+kdBqDW#qDVALsy6H9R!Z0Xs!^4# znv$1{<8p+l47BJCv|we_2{uxeQuon=FY7s4vhzpn45E34=ANzBnB1Jq9rhxFam;5%pjd(Mq-cHWi09+GlLOQ#%Z`dJCZW>4+q|7V zYGj29lUzcV$Yq-NE9r>P+r-lKJo~KwzURua+y^g(L?Pf|)I<^9%_Ik;yI_3&1g(LG z<^fQlw)ESUo&ArK514@2RQ?ds>T*V87pjm&4AKOJn*%WTAcmxVxqmzSmBZ6$Um{j3v6SBdUEdgY+n$>iYs+U=0Vz&KHG-)J zql=TZ>*9Yx?Z8v)!0N~ekk&9KUwH*RyiEN0@~iLRdmEZqe+bYVKY{njb9a_ z*y;FF9i?=f$rSCN$fQ-l-?T301lbMYJrprs}=2$XjZmry*bT7yU?zL4a6}N>MwXg(5hb z>LQU6Pu0_)={%oz{k!h}-2wkapqHMqCP5n=RFV!io!J1ZguTPeG-kfVr7~dh|883t zEkAJFv&)3XjhR&|hl~0BICz(@2DZTag7V#agbQ43G>IVTph(6WlM7SMjkO=%Odt2N zbbgozeuRE4cz};ybTUYK_|MQkWdyxFsI^WKJ1;~M7G^@LRSW_?cD`lZLU?Pz@u9r9_0v6cOHW8=oEqo zq{IH^!MJXm^RM;2gICMGsjt%zJQw@2)2T1 z2R=3)0CQ7KP}PM#0eppWF77alvYC>&_Z*!4Mtm5EZEzM&r6q)6*rmnGR#4xmHN5)? zwBQc7bKwy)D=x?0&gAy^-$;*Ht<%{wef<#C`MfqR;o1|{{lkoL)o0+-mOsWk)0F9_ z_$C(q87IqRu*$r^6x5l2T&S>9GrmO%@N0zhpAet6dR4UX%U?-M6{Ay44Eh3v;}$Tk z{Nx>l7#RCYvvTG$G|&R-zf{1u>YlETcb}bm+oc3YtN+bW{x<&_>9f3?Ja3OJ|s%7{@)64sp5u${}sU#ZLpro_c8>$2AB6Gxz3I2Q>$L zGS8iRqc&QFAVP{*)<^-dZl8lZEvD&hJR_`JM)8pSq3=r!14=d))ZvM$*_>(I-Lgtz zz*MovzW(bal0OEiY0PhDMaxVfVF4dy&uzmYoPm}B^d0e~k-UZm)8ciZLV#;M`1dPO zw*i?=7t|A?fHqHmr=_}ZV&%7VJ(cW1pmE1(68{&ul!RCl1U7MkMZB%>0g=0jMNk(U zE|MY{ixz5vwY?wT%2}?XaA)vv7=8i(hb3LCG4P2#j(P=E+y9G@~|!ggCU+x=K9eCFWkRCCf9md5SlYc_h2J9anNvAAR3j;F0!DSWfWg#PI5Uvq%u?rS**l9WKIyQ3Jz z9l=aR=mkq%ol-w{v+l!s^4NTU2u?bcVXq-{PXum~N>$dGJZm2K_gLSV0TuHKgy!V# zk6VcYX*OGqNcy$Ujy)}L`(dV@w!Vs*auzrn`BN@sIF;wcri-OeUXX*W8)FEnO2=(YA$FGDlzxgm}t`VbMncWW@Iu1M$o8(l}B~&L4_QX&1`T7l_&P z#KIF(r*w4oi_Pqp?ehnxNNzTKrRaJl@2q3m zi49gOi5TKp^~kWpKv50`A?ycko^CvH^)gnU#RtnUsDGGQzRtMWt5&G Iq>NtwANH_Qg#Z8m literal 0 HcmV?d00001 From e3420264cca007a0dbb6ef4ad897404a2b1d4ce4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Fri, 8 Oct 2021 11:28:16 +0300 Subject: [PATCH 19/33] store: discard unneeded information directly (#4750) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Discard unneeded data directly by calling Discard instead of copying to `io.Discard`. The latter has a `sync.Pool` underneath from which it retrieves byte slices into which data is read into, and after that it is immediately discarded. So, save some time by just discarding unneeded bytes directly. Comparison: ``` name old time/op new time/op delta BlockSeries/concurrency:_1-16 8.81ms ± 3% 8.35ms ± 7% -5.26% (p=0.000 n=69+76) BlockSeries/concurrency:_2-16 4.76ms ± 5% 4.36ms ± 5% -8.41% (p=0.000 n=80+74) BlockSeries/concurrency:_4-16 2.83ms ± 4% 2.70ms ± 6% -4.82% (p=0.000 n=77+80) BlockSeries/concurrency:_8-16 2.24ms ± 7% 2.21ms ± 5% -1.20% (p=0.002 n=80+78) BlockSeries/concurrency:_16-16 2.36ms ± 7% 2.24ms ± 8% -5.29% (p=0.000 n=78+76) BlockSeries/concurrency:_32-16 3.53ms ±10% 3.42ms ± 9% -3.23% (p=0.000 n=79+80) name old alloc/op new alloc/op delta BlockSeries/concurrency:_1-16 5.19MB ± 8% 5.17MB ± 5% ~ (p=0.243 n=79+76) BlockSeries/concurrency:_2-16 5.34MB ± 6% 5.27MB ± 8% -1.31% (p=0.006 n=79+79) BlockSeries/concurrency:_4-16 5.28MB ±10% 5.28MB ± 9% ~ (p=0.641 n=80+79) BlockSeries/concurrency:_8-16 5.33MB ±12% 5.39MB ± 8% ~ (p=0.143 n=80+77) BlockSeries/concurrency:_16-16 6.39MB ± 9% 6.16MB ±12% -3.66% (p=0.000 n=75+78) BlockSeries/concurrency:_32-16 9.20MB ±18% 9.03MB ±18% ~ (p=0.061 n=79+80) name old allocs/op new allocs/op delta BlockSeries/concurrency:_1-16 31.6k ± 4% 31.7k ± 3% ~ (p=0.325 n=80+76) BlockSeries/concurrency:_2-16 31.9k ± 2% 30.9k ± 3% -3.37% (p=0.000 n=80+75) BlockSeries/concurrency:_4-16 32.4k ± 3% 31.9k ± 4% -1.39% (p=0.000 n=80+80) BlockSeries/concurrency:_8-16 32.2k ± 6% 32.5k ± 4% +0.96% (p=0.011 n=78+80) BlockSeries/concurrency:_16-16 35.0k ± 7% 33.7k ± 8% -3.70% (p=0.000 n=78+76) BlockSeries/concurrency:_32-16 51.6k ± 8% 50.6k ±10% -1.81% (p=0.012 n=80+80) ``` Signed-off-by: Giedrius Statkevičius --- pkg/store/bucket.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index d181f047315..39d78b257f7 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -2495,7 +2495,7 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a readOffset = int(pIdxs[0].offset) // Save a few allocations. - written int64 + written int diff uint32 chunkLen int n int @@ -2504,11 +2504,11 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. for readOffset < int(pIdx.offset) { - written, err = io.CopyN(ioutil.Discard, bufReader, int64(pIdx.offset)-int64(readOffset)) + written, err = bufReader.Discard(int(pIdx.offset) - int(readOffset)) if err != nil { return errors.Wrap(err, "fast forward range reader") } - readOffset += int(written) + readOffset += written } // Presume chunk length to be reasonably large for common use cases. // However, declaration for EstimatedMaxChunkSize warns us some chunks could be larger in some rare cases. From dde2cae91808adcaa31391c58f5db5a2b9f0cc01 Mon Sep 17 00:00:00 2001 From: aymericDD Date: Fri, 8 Oct 2021 19:32:08 +0200 Subject: [PATCH 20/33] store: validate --block-sync-concurrency parameter (#4753) * store: valide block sync concurrency parameter Must be equal or greater than 1 to avoid blocked program. Signed-off-by: Aymeric * docs: update docs (#4753) Signed-off-by: Aymeric Co-authored-by: Aymeric --- CHANGELOG.md | 1 + cmd/thanos/store.go | 2 +- docs/components/store.md | 1 + pkg/store/bucket.go | 17 +++++++++++++++++ pkg/store/bucket_test.go | 26 ++++++++++++++++++++++++++ 5 files changed, 46 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8d67abc400..a2695830e66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Fixed - [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races +- [#4753](https://github.com/thanos-io/thanos/pull/4753) Store: valide block sync concurrency parameter ### Added diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index 19b6dbf8370..25310f9ef3b 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -113,7 +113,7 @@ func (sc *storeConfig) registerFlag(cmd extkingpin.FlagClause) { cmd.Flag("sync-block-duration", "Repeat interval for syncing the blocks between local and remote view."). Default("3m").DurationVar(&sc.syncInterval) - cmd.Flag("block-sync-concurrency", "Number of goroutines to use when constructing index-cache.json blocks from object storage."). + cmd.Flag("block-sync-concurrency", "Number of goroutines to use when constructing index-cache.json blocks from object storage. Must be equal or greater than 1."). Default("20").IntVar(&sc.blockSyncConcurrency) cmd.Flag("block-meta-fetch-concurrency", "Number of goroutines to use when fetching block metadata from object storage."). diff --git a/docs/components/store.md b/docs/components/store.md index 3ae037beb90..cdc86050524 100644 --- a/docs/components/store.md +++ b/docs/components/store.md @@ -34,6 +34,7 @@ Flags: --block-sync-concurrency=20 Number of goroutines to use when constructing index-cache.json blocks from object storage. + Must be equal or greater than 1. --chunk-pool-size=2GB Maximum size of concurrently allocatable bytes reserved strictly to reuse for chunks in memory. diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 39d78b257f7..3af0c179ec6 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -92,6 +92,12 @@ const ( // Labels for metrics. labelEncode = "encode" labelDecode = "decode" + + minBlockSyncConcurrency = 1 +) + +var ( + errBlockSyncConcurrencyNotValid = errors.New("the block sync concurrency must be equal or greater than 1.") ) type bucketStoreMetrics struct { @@ -298,6 +304,13 @@ type BucketStore struct { enableSeriesResponseHints bool } +func (b *BucketStore) validate() error { + if b.blockSyncConcurrency < minBlockSyncConcurrency { + return errBlockSyncConcurrencyNotValid + } + return nil +} + type noopCache struct{} func (noopCache) StorePostings(context.Context, ulid.ULID, labels.Label, []byte) {} @@ -407,6 +420,10 @@ func NewBucketStore( s.indexReaderPool = indexheader.NewReaderPool(s.logger, lazyIndexReaderEnabled, lazyIndexReaderIdleTimeout, indexReaderPoolMetrics) s.metrics = newBucketStoreMetrics(s.reg) // TODO(metalmatze): Might be possible via Option too + if err := s.validate(); err != nil { + return nil, errors.Wrap(err, "validate config") + } + if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create dir") } diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index e635ab22c1e..adc57017409 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -556,6 +556,32 @@ func TestGapBasedPartitioner_Partition(t *testing.T) { } } +func TestBucketStoreConfig_validate(t *testing.T) { + tests := map[string]struct { + config *BucketStore + expected error + }{ + "should pass on valid config": { + config: &BucketStore{ + blockSyncConcurrency: 1, + }, + expected: nil, + }, + "should fail on blockSyncConcurrency < 1": { + config: &BucketStore{ + blockSyncConcurrency: 0, + }, + expected: errBlockSyncConcurrencyNotValid, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + testutil.Equals(t, testData.expected, testData.config.validate()) + }) + } +} + func TestBucketStore_Info(t *testing.T) { defer testutil.TolerantVerifyLeak(t) From d5156d8e10f8f6bfc880ccf98b0cbf37dd0b1304 Mon Sep 17 00:00:00 2001 From: ian woolf Date: Sun, 10 Oct 2021 17:27:32 +0800 Subject: [PATCH 21/33] pkg/block: childSources in addNodeBySources do not need to be assigned every time (#4758) Signed-off-by: ian woolf --- pkg/block/fetcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/block/fetcher.go b/pkg/block/fetcher.go index f9f45202d99..77091ecbffc 100644 --- a/pkg/block/fetcher.go +++ b/pkg/block/fetcher.go @@ -660,9 +660,9 @@ func (f *DeduplicateFilter) DuplicateIDs() []ulid.ULID { func addNodeBySources(root, add *Node) bool { var rootNode *Node + childSources := add.Compaction.Sources for _, node := range root.Children { parentSources := node.Compaction.Sources - childSources := add.Compaction.Sources // Block exists with same sources, add as child. if contains(parentSources, childSources) && contains(childSources, parentSources) { From 7dee6fa1568256e282b1f07a9c90ef092d70ffcb Mon Sep 17 00:00:00 2001 From: Matej Gera <38492574+matej-g@users.noreply.github.com> Date: Tue, 12 Oct 2021 11:28:57 +0200 Subject: [PATCH 22/33] Tests: Attempt to fix flaky test in reloader on directories changes (#4765) * Fix by catching up on missed steps Signed-off-by: Matej Gera * Review feedback - simplify mutex unlock Signed-off-by: Matej Gera --- pkg/reloader/reloader_test.go | 165 ++++++++++++++++++---------------- 1 file changed, 87 insertions(+), 78 deletions(-) diff --git a/pkg/reloader/reloader_test.go b/pkg/reloader/reloader_test.go index 6659d20cc7f..25a0af5ae9b 100644 --- a/pkg/reloader/reloader_test.go +++ b/pkg/reloader/reloader_test.go @@ -247,6 +247,84 @@ func TestReloader_DirectoriesApply(t *testing.T) { testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-source.yaml"), path.Join(dir2, "rule3-001.yaml"))) testutil.Ok(t, ioutil.WriteFile(path.Join(dir2, "rule-dir", "rule4.yaml"), []byte("rule4"), os.ModePerm)) + stepFunc := func(rel int) { + t.Log("Performing step number", rel) + switch rel { + case 0: + // Create rule2.yaml. + // + // dir + // ├─ rule-dir -> dir2/rule-dir + // ├─ rule1.yaml + // └─ rule2.yaml (*) + // dir2 + // ├─ rule-dir + // │ └─ rule4.yaml + // ├─ rule3-001.yaml -> rule3-source.yaml + // └─ rule3-source.yaml + testutil.Ok(t, ioutil.WriteFile(path.Join(dir, "rule2.yaml"), []byte("rule2"), os.ModePerm)) + case 1: + // Update rule1.yaml. + // + // dir + // ├─ rule-dir -> dir2/rule-dir + // ├─ rule1.yaml (*) + // └─ rule2.yaml + // dir2 + // ├─ rule-dir + // │ └─ rule4.yaml + // ├─ rule3-001.yaml -> rule3-source.yaml + // └─ rule3-source.yaml + testutil.Ok(t, os.Rename(tempRule1File, path.Join(dir, "rule1.yaml"))) + case 2: + // Create dir/rule3.yaml (symlink to rule3-001.yaml). + // + // dir + // ├─ rule-dir -> dir2/rule-dir + // ├─ rule1.yaml + // ├─ rule2.yaml + // └─ rule3.yaml -> dir2/rule3-001.yaml (*) + // dir2 + // ├─ rule-dir + // │ └─ rule4.yaml + // ├─ rule3-001.yaml -> rule3-source.yaml + // └─ rule3-source.yaml + testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-001.yaml"), path.Join(dir2, "rule3.yaml"))) + testutil.Ok(t, os.Rename(path.Join(dir2, "rule3.yaml"), path.Join(dir, "rule3.yaml"))) + case 3: + // Update the symlinked file and replace the symlink file to trigger fsnotify. + // + // dir + // ├─ rule-dir -> dir2/rule-dir + // ├─ rule1.yaml + // ├─ rule2.yaml + // └─ rule3.yaml -> dir2/rule3-002.yaml (*) + // dir2 + // ├─ rule-dir + // │ └─ rule4.yaml + // ├─ rule3-002.yaml -> rule3-source.yaml (*) + // └─ rule3-source.yaml (*) + testutil.Ok(t, os.Rename(tempRule3File, path.Join(dir2, "rule3-source.yaml"))) + testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-source.yaml"), path.Join(dir2, "rule3-002.yaml"))) + testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-002.yaml"), path.Join(dir2, "rule3.yaml"))) + testutil.Ok(t, os.Rename(path.Join(dir2, "rule3.yaml"), path.Join(dir, "rule3.yaml"))) + testutil.Ok(t, os.Remove(path.Join(dir2, "rule3-001.yaml"))) + case 4: + // Update rule4.yaml in the symlinked directory. + // + // dir + // ├─ rule-dir -> dir2/rule-dir + // ├─ rule1.yaml + // ├─ rule2.yaml + // └─ rule3.yaml -> rule3-source.yaml + // dir2 + // ├─ rule-dir + // │ └─ rule4.yaml (*) + // └─ rule3-source.yaml + testutil.Ok(t, os.Rename(tempRule4File, path.Join(dir2, "rule-dir", "rule4.yaml"))) + } + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) g := sync.WaitGroup{} g.Add(1) @@ -267,90 +345,21 @@ func TestReloader_DirectoriesApply(t *testing.T) { reloadsMtx.Lock() rel := reloads + reloadsMtx.Unlock() if init && rel <= reloadsSeen { - reloadsMtx.Unlock() continue } - reloadsMtx.Unlock() - init = true - reloadsSeen = rel - t.Log("Performing step number", rel) - switch rel { - case 0: - // Create rule2.yaml. - // - // dir - // ├─ rule-dir -> dir2/rule-dir - // ├─ rule1.yaml - // └─ rule2.yaml (*) - // dir2 - // ├─ rule-dir - // │ └─ rule4.yaml - // ├─ rule3-001.yaml -> rule3-source.yaml - // └─ rule3-source.yaml - testutil.Ok(t, ioutil.WriteFile(path.Join(dir, "rule2.yaml"), []byte("rule2"), os.ModePerm)) - case 1: - // Update rule1.yaml. - // - // dir - // ├─ rule-dir -> dir2/rule-dir - // ├─ rule1.yaml (*) - // └─ rule2.yaml - // dir2 - // ├─ rule-dir - // │ └─ rule4.yaml - // ├─ rule3-001.yaml -> rule3-source.yaml - // └─ rule3-source.yaml - testutil.Ok(t, os.Rename(tempRule1File, path.Join(dir, "rule1.yaml"))) - case 2: - // Create dir/rule3.yaml (symlink to rule3-001.yaml). - // - // dir - // ├─ rule-dir -> dir2/rule-dir - // ├─ rule1.yaml - // ├─ rule2.yaml - // └─ rule3.yaml -> dir2/rule3-001.yaml (*) - // dir2 - // ├─ rule-dir - // │ └─ rule4.yaml - // ├─ rule3-001.yaml -> rule3-source.yaml - // └─ rule3-source.yaml - testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-001.yaml"), path.Join(dir2, "rule3.yaml"))) - testutil.Ok(t, os.Rename(path.Join(dir2, "rule3.yaml"), path.Join(dir, "rule3.yaml"))) - case 3: - // Update the symlinked file and replace the symlink file to trigger fsnotify. - // - // dir - // ├─ rule-dir -> dir2/rule-dir - // ├─ rule1.yaml - // ├─ rule2.yaml - // └─ rule3.yaml -> dir2/rule3-002.yaml (*) - // dir2 - // ├─ rule-dir - // │ └─ rule4.yaml - // ├─ rule3-002.yaml -> rule3-source.yaml (*) - // └─ rule3-source.yaml (*) - testutil.Ok(t, os.Rename(tempRule3File, path.Join(dir2, "rule3-source.yaml"))) - testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-source.yaml"), path.Join(dir2, "rule3-002.yaml"))) - testutil.Ok(t, os.Symlink(path.Join(dir2, "rule3-002.yaml"), path.Join(dir2, "rule3.yaml"))) - testutil.Ok(t, os.Rename(path.Join(dir2, "rule3.yaml"), path.Join(dir, "rule3.yaml"))) - testutil.Ok(t, os.Remove(path.Join(dir2, "rule3-001.yaml"))) - case 4: - // Update rule4.yaml in the symlinked directory. - // - // dir - // ├─ rule-dir -> dir2/rule-dir - // ├─ rule1.yaml - // ├─ rule2.yaml - // └─ rule3.yaml -> rule3-source.yaml - // dir2 - // ├─ rule-dir - // │ └─ rule4.yaml (*) - // └─ rule3-source.yaml - testutil.Ok(t, os.Rename(tempRule4File, path.Join(dir2, "rule-dir", "rule4.yaml"))) + // Catch up if reloader is step(s) ahead. + for skipped := rel - reloadsSeen - 1; skipped > 0; skipped-- { + stepFunc(rel - skipped) } + stepFunc(rel) + + init = true + reloadsSeen = rel + if rel > 4 { // All good. return From d2b5bc0500103020b0a30c45c4bec768ad65d9d2 Mon Sep 17 00:00:00 2001 From: Matej Gera <38492574+matej-g@users.noreply.github.com> Date: Wed, 13 Oct 2021 07:30:45 +0200 Subject: [PATCH 23/33] Query: Fix panic on stores endpoint (#4754) * Do not panic on missing component type - Skip an endpoint from stores if component type is nil - Simplify - pass only endpoint status func instead of endpoints Signed-off-by: Matej Gera * Add tests Signed-off-by: Matej Gera * Update CHANGELOG Signed-off-by: Matej Gera --- CHANGELOG.md | 8 ++-- cmd/thanos/query.go | 2 +- pkg/api/query/v1.go | 14 ++++--- pkg/api/query/v1_test.go | 87 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2695830e66..87d8c82db3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,11 +10,6 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased -### Fixed - -- [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races -- [#4753](https://github.com/thanos-io/thanos/pull/4753) Store: valide block sync concurrency parameter - ### Added - [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response. @@ -24,6 +19,9 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ### Fixed - [#4508](https://github.com/thanos-io/thanos/pull/4508) Adjust and rename `ThanosSidecarUnhealthy` to `ThanosSidecarNoConnectionToStartedPrometheus`; Remove `ThanosSidecarPrometheusDown` alert; Remove unused `thanos_sidecar_last_heartbeat_success_time_seconds` metrics. +- [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races. +- [#4754](https://github.com/thanos-io/thanos/pull/4754) Query: Fix possible panic on stores endpoint. +- [#4753](https://github.com/thanos-io/thanos/pull/4753) Store: validate block sync concurrency parameter ## [v0.23.1](https://github.com/thanos-io/thanos/tree/release-0.23) - 2021.10.1 diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 8b280229b2b..373cd579134 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -569,7 +569,7 @@ func runQuery( api := v1.NewQueryAPI( logger, - endpoints, + endpoints.GetEndpointStatus, engineFactory(promql.NewEngine, engineOpts, dynamicLookbackDelta), queryableCreator, // NOTE: Will share the same replica label as the query for now. diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index 4f3866b62b7..fd9ee99b683 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -93,8 +93,8 @@ type QueryAPI struct { enableExemplarPartialResponse bool disableCORS bool - replicaLabels []string - endpointSet *query.EndpointSet + replicaLabels []string + endpointStatus func() []query.EndpointStatus defaultRangeQueryStep time.Duration defaultInstantQueryMaxSourceResolution time.Duration @@ -106,7 +106,7 @@ type QueryAPI struct { // NewQueryAPI returns an initialized QueryAPI type. func NewQueryAPI( logger log.Logger, - endpointSet *query.EndpointSet, + endpointStatus func() []query.EndpointStatus, qe func(int64) *promql.Engine, c query.QueryableCreator, ruleGroups rules.UnaryClient, @@ -146,7 +146,7 @@ func NewQueryAPI( enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse, enableExemplarPartialResponse: enableExemplarPartialResponse, replicaLabels: replicaLabels, - endpointSet: endpointSet, + endpointStatus: endpointStatus, defaultRangeQueryStep: defaultRangeQueryStep, defaultInstantQueryMaxSourceResolution: defaultInstantQueryMaxSourceResolution, defaultMetadataTimeRange: defaultMetadataTimeRange, @@ -715,7 +715,11 @@ func (qapi *QueryAPI) labelNames(r *http.Request) (interface{}, []error, *api.Ap func (qapi *QueryAPI) stores(_ *http.Request) (interface{}, []error, *api.ApiError) { statuses := make(map[string][]query.EndpointStatus) - for _, status := range qapi.endpointSet.GetEndpointStatus() { + for _, status := range qapi.endpointStatus() { + // Don't consider an endpoint if we cannot retrieve component type. + if status.ComponentType == nil { + continue + } statuses[status.ComponentType.String()] = append(statuses[status.ComponentType.String()], status) } return statuses, nil, nil diff --git a/pkg/api/query/v1_test.go b/pkg/api/query/v1_test.go index a9f0648f425..218498fe819 100644 --- a/pkg/api/query/v1_test.go +++ b/pkg/api/query/v1_test.go @@ -1201,6 +1201,93 @@ func TestMetadataEndpoints(t *testing.T) { } } +func TestStoresEndpoint(t *testing.T) { + apiWithNotEndpoints := &QueryAPI{ + endpointStatus: func() []query.EndpointStatus { + return []query.EndpointStatus{} + }, + } + apiWithValidEndpoints := &QueryAPI{ + endpointStatus: func() []query.EndpointStatus { + return []query.EndpointStatus{ + { + Name: "endpoint-1", + ComponentType: component.Store, + }, + { + Name: "endpoint-2", + ComponentType: component.Store, + }, + { + Name: "endpoint-3", + ComponentType: component.Sidecar, + }, + } + }, + } + apiWithInvalidEndpoint := &QueryAPI{ + endpointStatus: func() []query.EndpointStatus { + return []query.EndpointStatus{ + { + Name: "endpoint-1", + ComponentType: component.Store, + }, + { + Name: "endpoint-2", + }, + } + }, + } + + testCases := []endpointTestCase{ + { + endpoint: apiWithNotEndpoints.stores, + method: http.MethodGet, + response: map[string][]query.EndpointStatus{}, + }, + { + endpoint: apiWithValidEndpoints.stores, + method: http.MethodGet, + response: map[string][]query.EndpointStatus{ + "store": { + { + Name: "endpoint-1", + ComponentType: component.Store, + }, + { + Name: "endpoint-2", + ComponentType: component.Store, + }, + }, + "sidecar": { + { + Name: "endpoint-3", + ComponentType: component.Sidecar, + }, + }, + }, + }, + { + endpoint: apiWithInvalidEndpoint.stores, + method: http.MethodGet, + response: map[string][]query.EndpointStatus{ + "store": { + { + Name: "endpoint-1", + ComponentType: component.Store, + }, + }, + }, + }, + } + + for i, test := range testCases { + if ok := testEndpoint(t, test, strings.TrimSpace(fmt.Sprintf("#%d %s", i, test.query.Encode())), reflect.DeepEqual); !ok { + return + } + } +} + func TestParseTime(t *testing.T) { ts, err := time.Parse(time.RFC3339Nano, "2015-06-03T13:21:58.555Z") if err != nil { From 1af2000a84563ecadd265de5b4338e74cca13dd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Wed, 13 Oct 2021 16:57:04 +0300 Subject: [PATCH 24/33] store: get buf from chunk pool (#4755) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get the `buf` from the chunkPool. With lots of concurrent `loadChunks`, the `make()` has a cost. Benchmark diff: ``` name old time/op new time/op delta BlockSeries/concurrency:_1-16 8.35ms ± 7% 7.80ms ± 6% -6.59% (p=0.000 n=76+75) BlockSeries/concurrency:_2-16 4.36ms ± 5% 4.30ms ± 4% -1.42% (p=0.000 n=74+79) BlockSeries/concurrency:_4-16 2.70ms ± 6% 2.63ms ± 5% -2.48% (p=0.000 n=80+77) BlockSeries/concurrency:_8-16 2.21ms ± 5% 2.23ms ± 7% ~ (p=0.055 n=78+78) BlockSeries/concurrency:_16-16 2.24ms ± 8% 2.22ms ± 8% ~ (p=0.265 n=76+78) BlockSeries/concurrency:_32-16 3.42ms ± 9% 3.39ms ±11% ~ (p=0.367 n=80+80) name old alloc/op new alloc/op delta BlockSeries/concurrency:_1-16 5.17MB ± 5% 4.95MB ± 7% -4.16% (p=0.000 n=76+78) BlockSeries/concurrency:_2-16 5.27MB ± 8% 5.10MB ± 7% -3.16% (p=0.000 n=79+78) BlockSeries/concurrency:_4-16 5.28MB ± 9% 4.92MB ± 8% -6.88% (p=0.000 n=79+79) BlockSeries/concurrency:_8-16 5.39MB ± 8% 5.14MB ± 9% -4.71% (p=0.000 n=77+80) BlockSeries/concurrency:_16-16 6.16MB ±12% 5.89MB ±12% -4.39% (p=0.000 n=78+78) BlockSeries/concurrency:_32-16 9.03MB ±18% 8.88MB ±18% ~ (p=0.137 n=80+80) name old allocs/op new allocs/op delta BlockSeries/concurrency:_1-16 31.7k ± 3% 31.1k ± 3% -1.93% (p=0.000 n=76+80) BlockSeries/concurrency:_2-16 30.9k ± 3% 31.3k ± 4% +1.58% (p=0.000 n=75+80) BlockSeries/concurrency:_4-16 31.9k ± 4% 31.5k ± 4% -1.21% (p=0.000 n=80+80) BlockSeries/concurrency:_8-16 32.5k ± 4% 32.5k ± 4% ~ (p=0.805 n=80+78) BlockSeries/concurrency:_16-16 33.7k ± 8% 33.9k ± 7% ~ (p=0.412 n=76+77) BlockSeries/concurrency:_32-16 50.6k ±10% 50.7k ±11% ~ (p=0.918 n=80+80) ``` Signed-off-by: Giedrius Statkevičius --- pkg/store/bucket.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 3af0c179ec6..be0e4bec97b 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -2508,7 +2508,7 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a r.stats.chunksFetchedSizeSum += int(part.End - part.Start) var ( - buf = make([]byte, EstimatedMaxChunkSize) + buf []byte readOffset = int(pIdxs[0].offset) // Save a few allocations. @@ -2518,6 +2518,14 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, res []seriesEntry, a n int ) + bufPooled, err := r.block.chunkPool.Get(EstimatedMaxChunkSize) + if err == nil { + buf = *bufPooled + } else { + buf = make([]byte, EstimatedMaxChunkSize) + } + defer r.block.chunkPool.Put(&buf) + for i, pIdx := range pIdxs { // Fast forward range reader to the next chunk start in case of sparse (for our purposes) byte range. for readOffset < int(pIdx.offset) { From f8927b9a656d6920afb8ce7a8b6e728a8a377531 Mon Sep 17 00:00:00 2001 From: as5764 <31504606+as5764@users.noreply.github.com> Date: Wed, 13 Oct 2021 23:37:33 +0530 Subject: [PATCH 25/33] Added darwinbox logo for thanos webpage (#4771) Co-authored-by: Anurag Sharma --- website/data/adopters.yml | 5 ++++- website/static/logos/darwinbox.png | Bin 0 -> 11874 bytes 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 website/static/logos/darwinbox.png diff --git a/website/data/adopters.yml b/website/data/adopters.yml index 4274e443dfb..86762b08431 100644 --- a/website/data/adopters.yml +++ b/website/data/adopters.yml @@ -158,4 +158,7 @@ adopters: logo: itau-unibanco.png - name: LabyrinthLabs url: https://lablabs.io - logo: lablabs.png \ No newline at end of file + logo: lablabs.png +- name: Darwinbox Digital Solutions + url: https://darwinbox.com + logo: darwinbox.png \ No newline at end of file diff --git a/website/static/logos/darwinbox.png b/website/static/logos/darwinbox.png new file mode 100644 index 0000000000000000000000000000000000000000..02be75ac9e5bb50e2e70598e87d69ce05d3d3a13 GIT binary patch literal 11874 zcmbt)1yEhfvi4?g+=9EiyIZi}?gY2s?gR<$uEE{i-CYC0HArwLNYJ00d*t4_c~$?b zH+yQA%E%o|4tLU z39@QG`7~nm=SDBwvr2*=_N4CDcsD0C1ofT;vyWBVN5*TaiKFJ5N!ldeVJA0>Z1CZK zV)bqOP8R@NRT0KhBzM>L1yD71H02a>oA!BoEx9y6TDHNwVGfghOjbJ=nKMA6OjDcf zy00ja6f5o9Z#Dg?i1ANF>0jSGNs+Eykqn1B+$uRtJ8IX@jS{1}y$fqd?335#aaUpv z?fk7G2vwxyejWzH7r^~vXZx+>5K32s1^*3)q0$WNm=6Cq`Gmt)b*cc_O$?fObp(F9 zgYOc96nDQ>g{~s3hqT`k{sK6*$S&|7cqOm! zKK>RS2tpk>MdY5r@cDkw`|jhzjql$4Q}#%wnb^-9+O6_^;O31_wd+8!~rZl*i7mFtY5y& z&JT39;CgNo_pElt%%misxD>G6 zuf=THjmqG-HYu8fsNl1*?@7dykU{MkduS^bWa1vZX43riNI`m#^*iC1GcyS7drEwA{E?J$ApA? zKO8te*cX|EISlNAEGrI|tsZoo5|>4&(v~B65@5sD=FsZprFlp(h6+Q;*O)ambF-Tc zfp2}jVBT$44s7R@cM;Z%tc4Eggn64up9akix43w8sd?Q?trszP@vV6|o6Lo@U-L>l zf5?#anpUs4=k}E=B@ABYcT|a|hd;vPnk2X*`^zc?&KpFDWhcG-9o^BnlQ)Dk*G(yV z{EU((ldk&{YO_Jp9YQ-C08rY=&o}LNmrWX$Q&r{Y5MAG?%3c5iCwR__t=)^p+mi$H zR)KFkJU#SKY_q%o z2&*=YNQY+svgN;?Kq3mk|04cx2yb3DR7NBK7zhLe2Zwy!R{szN2ZA5~V30`cQ0N$} zC}?a7$f!ij9D?LbpNL6ccUkDyVPFuz7l2&`15xQAhh_0nW!{I@$kam)3l5z+i98s6 zF~SLe)WyMkA)4n`!-Vt@pAeI3CH^Z?cHzit!?ujG^G0d9os#Q5-5Zg11!FF15KnT- zZF}ttU|1O`+cQ%VS38cQh0NMPV;O}{NIiyTKcF0kiks4C8w_|h81&GB1%jP0N)zw+ zo|eeIa_?Z`rcWE>@qIn2W+H3-a$+>~*_7aI<4aqEshn$a=ak4 zeg)^m!qK6!eTvt$USqPVRFQ!bs$3)4gePM5!pa&GZ`m&k-KdJwqeY zSR$De)qb`v_E0y6We8JL+YGSmA3fW z>pU&(&W>(}e~^zz`3yD79nEgrBhb+}Z-Q6@J9#BdqCI(tgcH9;s>`wKaLqYwvww|P z__M3}i^{6QH9?7Qw1pJ`8+|!16c>qg${yQ6OL}k992CP1j}%>@XOpJ`c{YW)EbY1z zb`#8x@m{-oRbO zN>aSkXue3Cv2IPJ&)Rl24QbKNViGVU1GL9wo(z14;>hWd0>m)Jg+JH^!G$k5o=JB| z8BVx@6_rqmVfL5Q5L_Ztu?$JcP+U(;l#b$KhA!gBmJ<5PyFGSN&Nv0`Br;(Zn{s8k z5jB}r)4Z8&AKZ>v2RT@QDbDj!m@%JwWv&#oMR~h9P<-N3?ai%QP}H0Ph`fcVT`owe z{A5_T>^t^f0N!tqooX)eR28=jhv=|V)t4rBtZ?*jtVQb^0OAP#mooazY zkJd7xEM!8Tfe0PLBdB64~&VAUc1p{5g^D5H#_%DDwhHul) zHhLk*F8~37Bb}Aab3GD9Ir5u-`NV_LGHH47Z0@=LJ|(icX$3!6-4!py=0t3Ver-4# zzYenSrquz6GHxcjwoQMddYmvx=qO=lnr4I-cc*t#dj}Onh8h9$PG($!R768kSQ02> zFZ&%;!sKE{iV%{WL{`oEZjk=xo9VQ`@085YF92fr2MPB$qf85Z4kV^ld&Zi+$TRVW zXHrW^P4Od4D&6Cuj@D;`{dos&k*=Gi`q%v4KtpdI&NsW8X#mGkUr9v zox>=_Md$&^(zl*N9crbvR&i_NG^Px&A9vOXy#}?T7cWiNtZI&vB z0vlxMh!Y_ZB6o}}S-?qn_V(4Q41ahP8Vm%4`s)n+>CQlRzu@2Z)|=Xij*qy!Q^lpBWVdcRMFwh$wqKD+EwzbDfbR;8BbQZ zIA+Q_w+61UF4XNn^%c~Vz>-0oyDU7K&c*b6dO4P}{<}@poBr>_7WT>BltZvpjg-uA z$`NhnchJ3rtuKJF z^*W2}TE0Gvpozl!5Kq`HZM>?0wB%*L-E;O!uF8 zhrPKWT1f4Vg0^a?NYo(D$pYNP7VhvAsnwGgetA~%HN*gdfY30nZ<*krU(S8~3jjw( zLO~^FW)o6$j3psuF|c>2sqKM4BNA5d^~(`7^v~^`K4%h9a{3g%Nye^h#2R<;XYc{} z7WmvO1sgWcuVNOlHgS>G(#h(|IcT$cH?g{bg;t-R)-Yt?Z8F=qIAa(oE%0q2L}bJ| zu*n~k;N1Itj8;)DbqFRH5Pn=vtH;?SVx7vhn<4U*mvs}JsX&)R2Qrc}O((uX#jWr8 zw8v1_YMYQtq+~cp&xXjX=PnJ^E4M%yyWNQMVVBjXvv}h9K9BR^F#Nc|S*5_j&{?wI z4)(a##4&Oc#A#Dlk9G7@cs#$^tSV7hzx4=Anp@VO3F}?2sz}tJ*Hh6-$P(gLLl1uH zuP*>a40n;^7XW&NH6NYn))}Dbq>jJA{!!Bv4(;rY#GIc59rz$ZXj%e*alv03X*qSiB zRKij^c76FxQ87P!pPzF;jd?i!owQL&6<8Ftz;PFsv>CcmBp{%E5n_gGh^_cpO->gu zjb1lOXnI#+X})Rn7Srv)9+!C8O8#BA-S_ulxeCakWMy_7lgGbRY#8IavbFEGDAgo;_g%<#}NhgCL*IlybKxvtcDB zk?+6Hw=&h>yzvt`RKauIoy_7rX;h{;>0y3~ec7nim@HG-kZZR1L)Z(T;o0D!4&s=8 zwWNSrZ*smd6NB}_HM$A87Qe9nhxf$aO8xVowyb1zTg_uaCdIa~hgXfba3f4f?IU(< znA0N%S!0_A9XPT6#WHAI)VV}g4*O#%_znhY+ltjZ7}qWiIvobEMTDZ=oLt5QW@$2~ zbGNbvbBfvR?oZ;aVfUrwaXC7fBHBc3PE7y9+7#Ffk7z`#%DUIOa^l=W#Zi6_{-Y>ex~71<=lMP3^^ARGaaU0RciQM~JARCa#hYK-giMEr8Y6 zy=}eqKnQ|m=5$JpSYr>J#5tg~SZ6v9bhc&4R+b9+-wFG#)#I{KY z&P?eba$+EyEnlkzj*K07k~nw1;{5!EfCikkkR&&B9pn+B(Qr;ok2SG`0{W(Tb)B;% zDgC0NbuvoAf%;5BLh;W!vB*fD_uyld?rl)J!l_xXt(3nF!%Og_nJs zO&Q>MI&nga0F}altHKdf2TC`Wmg=bBE5{FxboqCZ_|rj5j+Zo{aYN&!c$s{;TnMEp zph}~_0)|xVIAK|noiA=p1&`ufdLVKlT>pU25*U`Y;L{$$@Nn{O*_77IRicsMXR&mj zn?>wFxolO)#FR^+o+l2ZFzyJXq%oHg*%%|ZwARz;r7$Jz&M_>dVyQU@@*5ll#is!Ea zA=8>)d|xt+r9$(d#|#!oK)(@N-4tttgP4cD!?m14V|+0Qt9+-=mccb!p^%h@CgmN^ zap+KxsO$uO#1_D|n5&SMOL0Sc`~#hSXRICX-FCE-X`Kb7Fq-(Vurk;7VcSS=e7L9l zo6=}pif6>T*Ug3kjZi4`M}!#{`wz1x$G*5xHtGGJ(H9!-KV}1Ij*QYo-P_u$fxbv9 zCF=NWOj3_}m2MWf#io(!ZIqKs47HEMXWygUIjYJHl?|hY(&u1{$V^`qGb~>~uUIVF!6yYF+9NSEaP=)@=j{r2Kxk#ynA0>q-G-@+( zJm%;##3h9xn-NBeGbFZ#onxkW;#6$r@Jt0dh+GNrrG+4t6Y_clfM}lJd7Pib!3$tV zF1j@9Ux4U~vLAE1+`PPe&VV>Rvo?Y@AL$sr?Tv6)2M6v2#M6HHk z^F%%>^RKZqLx6~wZ&ffoPSEBxM^qY;Tmg!dq2iQTnYiv|#Be()D7NW(ZIpEfHgcmJ z>U5nbem*WfR2|ez+p1$CAw335McR^BGXcJerwvOcmpVd{w>^di5H#3`fsP?PKfk=y z$VNqUSKQ&*c5Rj`ea+%51$?`(oHpL**+3?*|Dm=e4t5cM&tp)XBd)WJc{WUJ)l3su zco@t^Nb%hf&Ytgj*r3GaCw)Gd*8hYp_$jy%AQHuq2UI4lG1<)U1@t9p*e=e~Gk<4by zvkf@dj=v3_Icp8hu1evoa=kd_tIm5=}KyA;x_1Zys( zjAlT-vuN(qp8&h4aEMZ$o+F$KoTlnkVL@8_p}fa((%~5I#a@tWu5^+-D9*6`0}?-J ztx)=)T=#=c;hdBz6>xP_>p>U*{A7%zl%wF_kDbtT6WAV$sTNRdK|&Mi66YSnk5j4| z=TS4j65vF!oPTv5NsuYNo>;(jKHGIgk2e`U3d%R)F-HmjQyHUeh;!CUU@# z0mRHgibw|bzOgyyL^VAZ`qNB;3ZFLr%Hq5wegQONlp7jTrqSF(cmX?o^#rT3m21AU znGklQbfQ1lx+>Rt-nwDU2Bc(9EZY^kNBFu!?WVha3Cgx&(!U@)Wp{BW4l%sw;Plc3 zy$2G3(R>#ZedA*5b7h*oU~Vc(J{}aQPdWAW7UjM-s1LD(dTgX(d&tX{0Qbo4d_z}$ zNnt9T*3sNE2vfR|$XC~R(D2hJP}m4w=Nip7Ru_7{0v}5ursaGw$;D^Ao7c3oPrkFM z@KN?8|NT%Ck0TWm4qb4UP<$l zrRJiWv<4Tp`EXtgqM(N?w(Tc*faZ@4-;O&7B;F6bDis7cZOUhSOJ!i<;t_g6v30;i zNv07GFU%cVZ!l5Br5bS+)Y1vLmCv)TupF+0j$ggSZoi=ko}@7^Nrx(Q?xG_eG&MhD zrd@J&0dk?NmdwgYKb$nPI25{YU%*A0GU3AS9xe`<8Ovxa{7V#Yl7?o%@nGrQcK3Da z4v#cXa21O*Pz(nN9uZZp25Y_Uj^0-gajEh0lkfd!zI~W@3`%CNGEZ6Fbt-ZgiLJ#q z9`_LJCAufPx)gsHk`?et7^#xz7)K5~S8i@63;nfz|D*j^|L=A;3*EiA_fxKt5CjHh zLJCe?bz44LdyjKcKIW*XL|??f|X|3UEX2Ov-aIa~Ik%?~;{tyh5@U z0L{=hegigtOdFFn^@o{SbUahnAIm7*6g{rA-1CsOVCj!907!-@(xMCb+nU^5zbteU zxIWBsg{2Y}DoV)O#V^9WgQ$Vi%jS)oAqvh)(7Hi*-8m>?F8UdQ3E0iE+zeAi%IJC zA?`EPOSRu9h&O~2`^`2z8N@Cv?9D>&>{vA$-+r%(r9UrHnkIshG#WL7$@IG?K2xaY z=(UE_Vy0DAQDHg_6UiVU&<3ZHGdtv=(9(bYI0^P{8~J(8uFKE!3xuR-)X0xs!;guG z_t2WH90D~0NIXUwRH$eSDK7w%lGu{-AU^p91U!+9ptM7bZYiLK%uRz+iv}qf#@vaT zuqCJX^IR7Kk{yey5*_ubJrorlsWmx3RiEYsFdI2sNl$0Q6z{gxIXL#2{}HeH`@6S! zeQ)g?$~#?W9CLf#^&~!7PY5Cp8H3l$kne)N5#@%GtfX~0%-x5E1qJ9}clqQ{2F%BT zp#plec}p+Md^3ERHv$ zNtGxBczgh%7bq_Zaf^ykZA5Xh)4COq(S)v7~yX`u0)s~K0a&e zHQy&YPhAN6Dj4bCnNA!%Qio9|0OfJss)$AwsbBgmQ7(?yM~DX9edanK%$vIA6V@YW z6)Y$}K-~nY(C46$(J;Vkd9=$;qW7@+_Abrk>_k-P^fKy)%o16l6eMMoo#lY#a!t48 zJCcv{r`yFaXF{|maA{?ozHmjG$ft$SHtmkfMU4*vA?3|c8p#wm@qs3Nust+4`Sf^M zF3EjvQP$@}Y6)g7jc9O7GZjz~=W@>zov89cl`%nl2fEXa!I@AhRjFnK=Ok+h zE}NVZgVT8wpOQWo=Srollsfc<*Ei$q_Y@Kh@remb4q4oXh>?+)zkSA!lo>_Rk8yTk zoXlPgviF8(qf=-;2k##cr`oH}{Ca$EjXGsX(6px`H|h!Go&YsAKnLZjCYDe7kqSoG z?UULDeElN1$6keA*sj)kk$T)ciyM=nN=s%XlERU@KhDyuZf&H^*(6JBg@%}(>BaU$4b3}ZU}VaJ6Y}0Z6HEZ}kvwj`LaKzdY9b+Cu`~{#THzBLP&5aKV({a{aU;>*m#E@o zI1=1ia`2kipX;mj6k24(I=0G~`lB*3E}^C&2h~qG0P6G9>-qV|w`RbvWCqgJ6yN{7 zZX>Yuo%~+UWl5C75>k=;@3@m^3+&}_%4okeplDg!9)hOiw}PW@^9P`nO51-cn0Ifr z1wm8xTU?`@1wCjphitzUtYzlghL_&_?Tk~jws=?LQrf>2yy~N48lwAe*4KY*V1M|h z|H~DZJ+Cd-_7q$EB;Un>(^YiW%HQ)&fD=KpUCEQ7z_48n@lG;T91ckSt=wONXnPdT zK8h|!4m>V)mCc@OXycc{U$vWxcmU5=9)44rZ!?r|uAUiv8~$&>fA3uO;nnrMVf{~& z{(ZpzPyaP)BS!`R!GMsU*Gmn5XC0A=nFWOu4eVoy6mpRCYyK6k{Yv5@J^6XTIkL!1 zOvfsQ7!vKLG*ihqVyIjb6?kwwz4^{MwChWDS2Nx=Vie7IVRU+_u|4)EJfyQzGn9N6 z81*Olz-Tt2sLp7q;duGR`XeS--PTYOGmHKbhits@d*F_9FgRCa=> ziZsij^lmR0O1}oo!Y(6E`o|Iu60(Bra@sl}t=F8Dl#a>U++!DC?L2aqV-XbG>hy9a zrxZVMY32Z%;NmZOQAU~t*%drE<6@o-4^yv@ssDfs4i5EanEERph|HWrENBo*B&1-E zr084IGkvcAuiW*o94pdm?pje6H>(9HleQOvJzS((yU{CEEmZqs>e^x@)|01Nderx+ z?z-7FmMVl1JGyIz0bShM?X$PiHR?gf%@0#E$Euqx437J&qLciQa^thd6Tg~)_iG-Q zKESEjy@^OIxQUuFBi%k#{y+i=248R_aVZ@PlsY78@hlYgyY3%9aaZOd#rrc6{jru& z^6#1rOcyov6`k0y*OBVx{h_6?Q82gQEy@TD*vd%q_J9Jgs^kF$Ql=?;sAQju>si_` zV^SmhoyEw{({@?Sw^oBhd)@ap{h*(f{|wxLz}Jtk;6UJOUi6nc0Iy3x2Nf+`N|p1 zg^B3I`Chng89uY>uzfWYL?b~Dp-O-Kv>g5oR}N4GZZf2?LH6DwIA5;QMgx5l%NZdk z+)gFUQtQB2w+n3a`19LSO~#}5;-VL_?DDX_hwY-dO&>drwto`TS^9ml)+}O=0jav3 zb*EF@C6RO8*I`ooSso>?6RwHRS+9M<@!TcUbjF%5stN2mmuA#HR6A29*E9nyK7;iJ z4%9a`D%I3d$Hye7L!S=#usjI*CKe~x(yN7+56YBCchRcN_G?L+At(#6C=a6>#>dyS zUq{H)O!);p*E`PYowPX_Mb`EwIrLV)&)IyQ{&^H}m(qFw%BsNo!6CollB{%r0qZBE zqiI+%jOEDM4|;MA@pw<=AzG<6PNCanCUJucre#^-*Y>W{W4q+Selv>&(YxLjb$Bq293Br0 z)^}+cj2Nxt*)BO}{sVEtM$Rw9CLPTRm{hIqa&TBE{9rA-(A=2A4I9~)ucEs=VTsvFTPE17KPi@*Yg|kk;?ahpFW|A2j7Q=rWGo{X*<;{hux+E(z{2W^YKQs-Y7bT1=wKI0 z5w&6V(@bKJHkX+B_u8vi+DETw$E#S^KXy-OpCgY^s$M>dPqsv7x4ZaNT*2+!uY15d z2AXiLl4P-azurmO4$>@4P!z{8$6~B8UBhlD#MNpwc`8OzUqD4E_@FK`{;OP~6B+bz3c3>(?Drf{hAD;^k#$)g7c6{~D{pdrbCS8)#9rGE|b0N!S+y0@)xOB4T zP&*_m)H0*H@!o?7MrC~m{@gc~3+(fSN~Q!4B8h^IA?POkdok`*@lAaaj6}3P4i#$@ zQ>+(&xh^YE{oRHg{^lll4GX1QUwZYm{*r4hd8QCovJ{ZS!CG%#u_9X1DsXL|g{y*F zoXN+!h&nM`mB7R=y1pLM4W8;6Ma`{{4XZ9LI3XY;?V^UlH)C{A$Ells`pY9A>h4pb zUyWbjZBSesu3*a+A%L1DW`tM-ZEVb6T{y3va-NSk?H`U1hcGLqZle7;JnCXw1 zK{6I^O1oLkC?AL}rPMx4-^Pff6p+~fMQ(umglMGWmZ!VV2w%%Uq_uu=dH^?ST~VR zXK0g{xy%sF(D7ZkpS~t=vCHe^6J51I6Ttc$>$F=fJlhf>dJMfdewI24b?3}0W(0mE z;4$;P_XD)z-U@Q!s<#+FuraRB!)s+TiG?anpv?PF@a^8aQHS)Z8;PEp^tv;}lWw%R z259Jo9RFiypK)jS{6dLMLL#aD(Xf0M?wVj8^Ak9EbPvsD9Ck=ZmTv`b#%>Q=Xvb64 ze+D4ln}ai(!#Cv(Flq?yb%=3(WvmbA{#IbWxt`*x%tq>wYkezsYA^@YJlZV$21N=< z%Zh^!TjPd^TH^(P%l`t9UE?%eyx=@*4nyKfH-Jgbp-wignECuX&$I-LogJ9Qzjy%% z5v0ATiSCXt)6^;XDYPjViBk)KAd-#gwfGpS99TJwVss!@rd!P`X+Zk(z<6%AjLzRLW`Rk?=)Sa9lQQB?y>NZxXkx$?YLuY>e%S^ zciko9F?t(r1jKmWILK=dKg5^fuqXDlbVYKWIX*tOwN>4|02+_ery}_YAM6wIo+Ff2 zWRsdt>A@*r*-pc7vyUv9k7p2PWzG{JIeP{UJeC47Bs8MZk}ah*7mn$k+6EKyD|^?w z>i}-y$gm0sWy|#3v+?a6ZpR*V%<5hl54w}PlJ!?pc$C*SqU^F8H%|TaXNoARA5MJ3 z-xnK3h7Y7RiN_(>Ng#czhTT!zq8S_Kt(Zs48*%XvM@)#p#LFA3pCQv$)g@9>lPA_S ztnio82Je{FSn{(1N_~#OsBru6h3TjZjHfkR!T}P&ze7(bP_TV~h;C1+tu4>)gL(D? z({m4nFX#oJ0RJ^sR-^o<#RlyoUGVF{TzQ)6^>)em1#l^ZMdu$Pt3HM#NE$<`h^`@jx&qH`q=Wa0I?1+P#>4eqPWH3Dr59=a=| z@=5|pIu9c)LwfQviWd Date: Thu, 14 Oct 2021 16:50:57 +0800 Subject: [PATCH 26/33] add block-viewer.global.sync-block-timeout flag to set timetou of sync block metas (#4764) Signed-off-by: ian woolf --- CHANGELOG.md | 1 + cmd/thanos/compact.go | 7 +++++-- docs/components/compact.md | 4 ++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87d8c82db3f..a843f4839bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response. - [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus. - [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. +- [#4764](https://github.com/thanos-io/thanos/pull/4764) Compactor: add `block-viewer.global.sync-block-timeout` flag to set the timeout of synchronization block metas. ### Fixed diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index 2ae488de6db..b4ff8f56902 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -537,14 +537,14 @@ func runCompact( } g.Add(func() error { - iterCtx, iterCancel := context.WithTimeout(ctx, conf.waitInterval) + iterCtx, iterCancel := context.WithTimeout(ctx, conf.blockViewerSyncBlockTimeout) _, _, _ = f.Fetch(iterCtx) iterCancel() // For /global state make sure to fetch periodically. return runutil.Repeat(conf.blockViewerSyncBlockInterval, ctx.Done(), func() error { return runutil.RetryWithLog(logger, time.Minute, ctx.Done(), func() error { - iterCtx, iterCancel := context.WithTimeout(ctx, conf.waitInterval) + iterCtx, iterCancel := context.WithTimeout(ctx, conf.blockViewerSyncBlockTimeout) defer iterCancel() _, _, err := f.Fetch(iterCtx) @@ -576,6 +576,7 @@ type compactConfig struct { blockSyncConcurrency int blockMetaFetchConcurrency int blockViewerSyncBlockInterval time.Duration + blockViewerSyncBlockTimeout time.Duration cleanupBlocksInterval time.Duration compactionConcurrency int downsampleConcurrency int @@ -634,6 +635,8 @@ func (cc *compactConfig) registerFlag(cmd extkingpin.FlagClause) { Default("32").IntVar(&cc.blockMetaFetchConcurrency) cmd.Flag("block-viewer.global.sync-block-interval", "Repeat interval for syncing the blocks between local and remote view for /global Block Viewer UI."). Default("1m").DurationVar(&cc.blockViewerSyncBlockInterval) + cmd.Flag("block-viewer.global.sync-block-timeout", "Maximum time for syncing the blocks between local and remote view for /global Block Viewer UI."). + Default("5m").DurationVar(&cc.blockViewerSyncBlockTimeout) cmd.Flag("compact.cleanup-interval", "How often we should clean up partially uploaded blocks and blocks with deletion mark in the background when --wait has been enabled. Setting it to \"0s\" disables it - the cleaning will only happen at the end of an iteration."). Default("5m").DurationVar(&cc.cleanupBlocksInterval) diff --git a/docs/components/compact.md b/docs/components/compact.md index ce1a796b36e..8b662a88e75 100644 --- a/docs/components/compact.md +++ b/docs/components/compact.md @@ -284,6 +284,10 @@ Flags: Repeat interval for syncing the blocks between local and remote view for /global Block Viewer UI. + --block-viewer.global.sync-block-timeout=5m + Maximum time for syncing the blocks between + local and remote view for /global Block Viewer + UI. --bucket-web-label=BUCKET-WEB-LABEL Prometheus label to use as timeline title in the bucket web UI From 41d2e962c69ceb1b789bcc8f4109207b2306ccf0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Nov=C3=A1k?= Date: Tue, 19 Oct 2021 12:05:55 +0200 Subject: [PATCH 27/33] Add capability to use custom AWS STS Endpoint (#4736) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add configurable AWS STS Endpoint Signed-off-by: Tomáš Novák * Edit Docs Signed-off-by: Tomáš Novák * Edit Changelog Signed-off-by: Tomáš Novák * Fix formating Signed-off-by: Tomáš Novák * Update CHANGELOG.md Co-authored-by: Bartlomiej Plotka Signed-off-by: Tomáš Novák Co-authored-by: Bartlomiej Plotka --- CHANGELOG.md | 1 + docs/storage.md | 7 +++++++ pkg/objstore/s3/s3.go | 6 ++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a843f4839bc..5ceefc193ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4680](https://github.com/thanos-io/thanos/pull/4680) Query: add `exemplar.partial-response` flag to control partial response. - [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus. - [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. +- [#4736](https://github.com/thanos-io/thanos/pull/4736) S3: Add capability to use custom AWS STS Endpoint. - [#4764](https://github.com/thanos-io/thanos/pull/4764) Compactor: add `block-viewer.global.sync-block-timeout` flag to set the timeout of synchronization block metas. ### Fixed diff --git a/docs/storage.md b/docs/storage.md index bd5558f76da..f40ed976b10 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -88,6 +88,7 @@ config: kms_key_id: "" kms_encryption_context: {} encryption_key: "" + sts_endpoint: "" ``` At a minimum, you will need to provide a value for the `bucket`, `endpoint`, `access_key`, and `secret_key` keys. The rest of the keys are optional. @@ -226,6 +227,12 @@ With this policy you should be able to run set `THANOS_TEST_OBJSTORE_SKIP=GCS,AZ Details about AWS policies: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +##### STS Endpoint + +If you want to use IAM credential retrieved from an instance profile, Thanos needs to authenticate through AWS STS. For this purposes you can specify your own STS Endpoint. + +By default Thanos will use endpoint: https://sts.amazonaws.com and AWS region coresponding endpoints. + #### GCS To configure Google Cloud Storage bucket as an object store you need to set `bucket` with GCS bucket name and configure Google Application credentials. diff --git a/pkg/objstore/s3/s3.go b/pkg/objstore/s3/s3.go index 8d50156d18b..321d8fe57db 100644 --- a/pkg/objstore/s3/s3.go +++ b/pkg/objstore/s3/s3.go @@ -84,8 +84,9 @@ type Config struct { ListObjectsVersion string `yaml:"list_objects_version"` // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. // NOTE we need to make sure this number does not produce more parts than 10 000. - PartSize uint64 `yaml:"part_size"` - SSEConfig SSEConfig `yaml:"sse_config"` + PartSize uint64 `yaml:"part_size"` + SSEConfig SSEConfig `yaml:"sse_config"` + STSEndpoint string `yaml:"sts_endpoint"` } // SSEConfig deals with the configuration of SSE for Minio. The following options are valid: @@ -228,6 +229,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B Client: &http.Client{ Transport: http.DefaultTransport, }, + Endpoint: config.STSEndpoint, }), } } From c68f4e3a715c782e9b921a6af7e6f7a2f55068db Mon Sep 17 00:00:00 2001 From: Akansha Tiwari <36307100+akanshat@users.noreply.github.com> Date: Wed, 20 Oct 2021 00:47:20 +0530 Subject: [PATCH 28/33] caching_bucket: Refactor caching keys (#4774) * refactor caching keys Signed-off-by: akanshat * restructure caching keys Signed-off-by: akanshat * add types to caching key constants Signed-off-by: akanshat * implement stringer interface on BucketCacheKey Signed-off-by: akanshat * add tests for ParseBucketCacheKey Signed-off-by: akanshat --- pkg/store/cache/caching_bucket.go | 112 +++++++++++++++---- pkg/store/cache/caching_bucket_test.go | 149 +++++++++++++++++++++++-- 2 files changed, 229 insertions(+), 32 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 2f55f2e0cd0..0a53af4e736 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -11,6 +11,7 @@ import ( "io" "io/ioutil" "strconv" + "strings" "sync" "time" @@ -31,7 +32,12 @@ const ( originBucket = "bucket" ) -var errObjNotFound = errors.Errorf("object not found") +var ( + errObjNotFound = errors.Errorf("object not found") + ErrInvalidBucketCacheKeyFormat = errors.New("key has invalid format") + ErrInvalidBucketCacheKeyVerb = errors.New("key has invalid verb") + ErrParseKeyInt = errors.New("failed to parse integer in key") +) // CachingBucket implementation that provides some caching features, based on passed configuration. type CachingBucket struct { @@ -130,8 +136,8 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er } cb.operationRequests.WithLabelValues(objstore.OpIter, cfgName).Inc() - - key := cachingKeyIter(dir) + iterVerb := BucketCacheKey{Verb: IterVerb, Name: dir} + key := iterVerb.String() data := cfg.cache.Fetch(ctx, []string{key}) if data[key] != nil { list, err := cfg.codec.Decode(data[key]) @@ -176,7 +182,8 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) cb.operationRequests.WithLabelValues(objstore.OpExists, cfgName).Inc() - key := cachingKeyExists(name) + existsVerb := BucketCacheKey{Verb: ExistsVerb, Name: name} + key := existsVerb.String() hits := cfg.cache.Fetch(ctx, []string{key}) if ex := hits[key]; ex != nil { @@ -218,8 +225,10 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e cb.operationRequests.WithLabelValues(objstore.OpGet, cfgName).Inc() - contentKey := cachingKeyContent(name) - existsKey := cachingKeyExists(name) + contentVerb := BucketCacheKey{Verb: ContentVerb, Name: name} + contentKey := contentVerb.String() + existsVerb := BucketCacheKey{Verb: ExistsVerb, Name: name} + existsKey := existsVerb.String() hits := cfg.cache.Fetch(ctx, []string{contentKey, existsKey}) if hits[contentKey] != nil { @@ -286,7 +295,8 @@ func (cb *CachingBucket) Attributes(ctx context.Context, name string) (objstore. } func (cb *CachingBucket) cachedAttributes(ctx context.Context, name, cfgName string, cache cache.Cache, ttl time.Duration) (objstore.ObjectAttributes, error) { - key := cachingKeyAttributes(name) + attrVerb := BucketCacheKey{Verb: AttributesVerb, Name: name} + key := attrVerb.String() cb.operationRequests.WithLabelValues(objstore.OpAttributes, cfgName).Inc() @@ -357,8 +367,8 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset end = attrs.Size } totalRequestedBytes += (end - off) - - k := cachingKeyObjectSubrange(name, off, end) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: off, End: end} + k := objectSubrange.String() keys = append(keys, k) offsetKeys[off] = k } @@ -482,24 +492,86 @@ func mergeRanges(input []rng, limit int64) []rng { return input[:last+1] } -func cachingKeyAttributes(name string) string { - return fmt.Sprintf("attrs:%s", name) -} +// VerbType is the type of operation whose result has been stored in the caching bucket's cache. +type VerbType string + +const ( + ExistsVerb VerbType = "exists" + ContentVerb VerbType = "content" + IterVerb VerbType = "iter" + AttributesVerb VerbType = "attrs" + SubrangeVerb VerbType = "subrange" +) -func cachingKeyObjectSubrange(name string, start, end int64) string { - return fmt.Sprintf("subrange:%s:%d:%d", name, start, end) +type BucketCacheKey struct { + Verb VerbType + Name string + Start int64 + End int64 } -func cachingKeyIter(name string) string { - return fmt.Sprintf("iter:%s", name) +// String returns the string representation of BucketCacheKey. +func (ck BucketCacheKey) String() string { + if ck.Start == 0 && ck.End == 0 { + return fmt.Sprintf("%s:%s", ck.Verb, ck.Name) + } + + return fmt.Sprintf("%s:%s:%d:%d", ck.Verb, ck.Name, ck.Start, ck.End) } -func cachingKeyExists(name string) string { - return fmt.Sprintf("exists:%s", name) +// IsValidVerb checks if the VerbType matches the predefined verbs. +func IsValidVerb(v VerbType) bool { + switch v { + case + ExistsVerb, + ContentVerb, + IterVerb, + AttributesVerb, + SubrangeVerb: + return true + } + return false } -func cachingKeyContent(name string) string { - return fmt.Sprintf("content:%s", name) +// ParseBucketCacheKey parses a string and returns BucketCacheKey. +func ParseBucketCacheKey(key string) (BucketCacheKey, error) { + ck := BucketCacheKey{} + slice := strings.Split(key, ":") + if len(slice) < 2 { + return ck, ErrInvalidBucketCacheKeyFormat + } + + verb := VerbType(slice[0]) + if !IsValidVerb(verb) { + return BucketCacheKey{}, ErrInvalidBucketCacheKeyVerb + } + + if verb == SubrangeVerb { + if len(slice) != 4 { + return BucketCacheKey{}, ErrInvalidBucketCacheKeyFormat + } + + start, err := strconv.ParseInt(slice[2], 10, 64) + if err != nil { + return BucketCacheKey{}, ErrParseKeyInt + } + + end, err := strconv.ParseInt(slice[3], 10, 64) + if err != nil { + return BucketCacheKey{}, ErrParseKeyInt + } + + ck.Start = start + ck.End = end + } else { + if len(slice) != 2 { + return BucketCacheKey{}, ErrInvalidBucketCacheKeyFormat + } + } + + ck.Verb = verb + ck.Name = slice[1] + return ck, nil } // Reader implementation that uses in-memory subranges. diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 902be08f37f..35875716925 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -125,9 +125,12 @@ func TestChunksCaching(t *testing.T) { expectedCachedBytes: 7 * subrangeSize, init: func() { // Delete first 3 subranges. - delete(cache.cache, cachingKeyObjectSubrange(name, 0*subrangeSize, 1*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 1*subrangeSize, 2*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 2*subrangeSize, 3*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 0 * subrangeSize, End: 1 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 1 * subrangeSize, End: 2 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 2 * subrangeSize, End: 3 * subrangeSize} + delete(cache.cache, objectSubrange.String()) }, }, @@ -140,9 +143,12 @@ func TestChunksCaching(t *testing.T) { expectedCachedBytes: 7 * subrangeSize, init: func() { // Delete last 3 subranges. - delete(cache.cache, cachingKeyObjectSubrange(name, 7*subrangeSize, 8*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 8*subrangeSize, 9*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 9*subrangeSize, 10*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 7 * subrangeSize, End: 8 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 8 * subrangeSize, End: 9 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 9 * subrangeSize, End: 10 * subrangeSize} + delete(cache.cache, objectSubrange.String()) }, }, @@ -155,9 +161,12 @@ func TestChunksCaching(t *testing.T) { expectedCachedBytes: 7 * subrangeSize, init: func() { // Delete 3 subranges in the middle. - delete(cache.cache, cachingKeyObjectSubrange(name, 3*subrangeSize, 4*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 4*subrangeSize, 5*subrangeSize)) - delete(cache.cache, cachingKeyObjectSubrange(name, 5*subrangeSize, 6*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 3 * subrangeSize, End: 4 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 4 * subrangeSize, End: 5 * subrangeSize} + delete(cache.cache, objectSubrange.String()) + objectSubrange = BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: 5 * subrangeSize, End: 6 * subrangeSize} + delete(cache.cache, objectSubrange.String()) }, }, @@ -174,7 +183,8 @@ func TestChunksCaching(t *testing.T) { if i > 0 && i%3 == 0 { continue } - delete(cache.cache, cachingKeyObjectSubrange(name, i*subrangeSize, (i+1)*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: i * subrangeSize, End: (i + 1) * subrangeSize} + delete(cache.cache, objectSubrange.String()) } }, }, @@ -194,7 +204,8 @@ func TestChunksCaching(t *testing.T) { if i == 3 || i == 5 || i == 7 { continue } - delete(cache.cache, cachingKeyObjectSubrange(name, i*subrangeSize, (i+1)*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: i * subrangeSize, End: (i + 1) * subrangeSize} + delete(cache.cache, objectSubrange.String()) } }, }, @@ -213,7 +224,8 @@ func TestChunksCaching(t *testing.T) { if i == 5 || i == 6 || i == 7 { continue } - delete(cache.cache, cachingKeyObjectSubrange(name, i*subrangeSize, (i+1)*subrangeSize)) + objectSubrange := BucketCacheKey{Verb: SubrangeVerb, Name: name, Start: i * subrangeSize, End: (i + 1) * subrangeSize} + delete(cache.cache, objectSubrange.String()) } }, }, @@ -661,3 +673,116 @@ func verifyObjectAttrs(t *testing.T, cb *CachingBucket, file string, expectedLen } func matchAll(string) bool { return true } + +func TestParseBucketCacheKey(t *testing.T) { + testcases := []struct { + key string + expected BucketCacheKey + expectedErr error + }{ + { + key: "exists:name", + expected: BucketCacheKey{ + Verb: ExistsVerb, + Name: "name", + Start: 0, + End: 0, + }, + expectedErr: nil, + }, + { + key: "content:name", + expected: BucketCacheKey{ + Verb: ContentVerb, + Name: "name", + Start: 0, + End: 0, + }, + expectedErr: nil, + }, + { + key: "iter:name", + expected: BucketCacheKey{ + Verb: IterVerb, + Name: "name", + Start: 0, + End: 0, + }, + expectedErr: nil, + }, + { + key: "attrs:name", + expected: BucketCacheKey{ + Verb: AttributesVerb, + Name: "name", + Start: 0, + End: 0, + }, + expectedErr: nil, + }, + { + key: "subrange:name:10:20", + expected: BucketCacheKey{ + Verb: SubrangeVerb, + Name: "name", + Start: 10, + End: 20, + }, + expectedErr: nil, + }, + // Any VerbType other than SubrangeVerb should not have a "start" and "end". + { + key: "iter:name:10:20", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyFormat, + }, + // Key must always have a name. + { + key: "iter", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyFormat, + }, + // Invalid VerbType should return an error. + { + key: "random:name", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyVerb, + }, + // Start must be an integer. + { + key: "subrange:name:random:10", + expected: BucketCacheKey{}, + expectedErr: ErrParseKeyInt, + }, + // End must be an integer. + { + key: "subrange:name:10:random", + expected: BucketCacheKey{}, + expectedErr: ErrParseKeyInt, + }, + // SubrangeVerb must have start and end. + { + key: "subrange:name", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyFormat, + }, + // SubrangeVerb must have start and end both. + { + key: "subrange:name:10", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyFormat, + }, + // Key must not be an empty string. + { + key: "", + expected: BucketCacheKey{}, + expectedErr: ErrInvalidBucketCacheKeyFormat, + }, + } + + for _, tc := range testcases { + res, err := ParseBucketCacheKey(tc.key) + testutil.Equals(t, tc.expectedErr, err) + testutil.Equals(t, tc.expected, res) + } +} From 6723c86f3a9ba81566d6e20994a3e7ba8566be4b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 20 Oct 2021 01:56:13 -0700 Subject: [PATCH 29/33] Ignore compaction group with only 1 block (#4789) * ignore compaction group with only 1 block Signed-off-by: Ben Ye * fix e2e tests Signed-off-by: yeya24 * remove comments Signed-off-by: Ben Ye --- pkg/compact/compact.go | 4 ++++ pkg/compact/compact_e2e_test.go | 10 ++++------ test/e2e/compact_test.go | 4 ++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index 547eae57f2c..2493a4a471c 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -1030,6 +1030,10 @@ func (c *BucketCompactor) Compact(ctx context.Context) (rerr error) { var groupErrs errutil.MultiError groupLoop: for _, g := range groups { + // Ignore groups with only one block because there is nothing to compact. + if len(g.IDs()) == 1 { + continue + } select { case groupErr := <-errChan: groupErrs.Add(groupErr) diff --git a/pkg/compact/compact_e2e_test.go b/pkg/compact/compact_e2e_test.go index 997b635a740..8b7843842ba 100644 --- a/pkg/compact/compact_e2e_test.go +++ b/pkg/compact/compact_e2e_test.go @@ -332,15 +332,13 @@ func testGroupCompactE2e(t *testing.T, mergeFunc storage.VerticalChunkSeriesMerg testutil.Equals(t, 4, MetricCount(grouper.compactionRunsStarted)) testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[0].Thanos)))) testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[7].Thanos)))) - // TODO(bwplotka): Looks like we do some unnecessary loops. Not a major problem but investigate. - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[4].Thanos)))) - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[5].Thanos)))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[4].Thanos)))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsStarted.WithLabelValues(DefaultGroupKey(metas[5].Thanos)))) testutil.Equals(t, 4, MetricCount(grouper.compactionRunsCompleted)) testutil.Equals(t, 2.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[0].Thanos)))) testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[7].Thanos)))) - // TODO(bwplotka): Looks like we do some unnecessary loops. Not a major problem but investigate. - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[4].Thanos)))) - testutil.Equals(t, 3.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[5].Thanos)))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[4].Thanos)))) + testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionRunsCompleted.WithLabelValues(DefaultGroupKey(metas[5].Thanos)))) testutil.Equals(t, 4, MetricCount(grouper.compactionFailures)) testutil.Equals(t, 1.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[0].Thanos)))) testutil.Equals(t, 0.0, promtest.ToFloat64(grouper.compactionFailures.WithLabelValues(DefaultGroupKey(metas[7].Thanos)))) diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index 46400797dd1..1daede5ea90 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -643,8 +643,8 @@ func testCompactWithStoreGateway(t *testing.T, penaltyDedup bool) { testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_compact_group_compactions_total")) testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(0), "thanos_compact_group_vertical_compactions_total")) testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(1), "thanos_compact_group_compactions_failures_total")) - testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(3), "thanos_compact_group_compaction_runs_started_total")) - testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(2), "thanos_compact_group_compaction_runs_completed_total")) + testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(2), "thanos_compact_group_compaction_runs_started_total")) + testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(1), "thanos_compact_group_compaction_runs_completed_total")) // However, the blocks have been cleaned because that happens concurrently. testutil.Ok(t, c.WaitSumMetrics(e2e.Equals(2), "thanos_compact_aborted_partial_uploads_deletion_attempts_total")) From d2d53e575b489a8cbfc9e1723d0e3f62a68faf39 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 20 Oct 2021 22:56:15 +0200 Subject: [PATCH 30/33] Fix data race in `BucketedBytes` pool (#4792) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix data race in BucketedBytes pool Previous test didn't detect the data race: we copied the bytes header to the bytes.Buffer so when appending to the slice we were not modifying the original one. However, the usage of this in bucketChunkReader.save() actually modifies the referenced slice, so the test was modified to test that it can be done safely. The race condition happened because we were reading the referenced slice capacity after putting it back to the pool, when someone else might already retrieved and modified it. Before modifying the implementation, this was the data race reported: ================== WARNING: DATA RACE Read at 0x00c0000bc900 by goroutine 36: github.com/thanos-io/thanos/pkg/pool.(*BucketedBytes).Put() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool.go:124 +0x1f9 github.com/thanos-io/thanos/pkg/pool.TestRacePutGet.func1() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:108 +0xfa github.com/thanos-io/thanos/pkg/pool.TestRacePutGet·dwrap·3() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:119 +0x65 Previous write at 0x00c0000bc900 by goroutine 27: github.com/thanos-io/thanos/pkg/pool.TestRacePutGet.func1() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:94 +0x1fa github.com/thanos-io/thanos/pkg/pool.TestRacePutGet·dwrap·3() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:119 +0x65 Goroutine 36 (running) created at: github.com/thanos-io/thanos/pkg/pool.TestRacePutGet() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:119 +0x257 testing.tRunner() /usr/local/Cellar/go/1.17/libexec/src/testing/testing.go:1259 +0x22f testing.(*T).Run·dwrap·21() 1 Fix data race in BucketedBytes pool /usr/local/Cellar/go/1.17/libexec/src/testing/testing.go:1306 +0x47 Goroutine 27 (running) created at: github.com/thanos-io/thanos/pkg/pool.TestRacePutGet() /Users/oleg/w/github.com/thanos-io/thanos/pkg/pool/pool_test.go:119 +0x257 testing.tRunner() /usr/local/Cellar/go/1.17/libexec/src/testing/testing.go:1259 +0x22f testing.(*T).Run·dwrap·21() /usr/local/Cellar/go/1.17/libexec/src/testing/testing.go:1306 +0x47 ================== Signed-off-by: Oleg Zaytsev * Update CHANGELOG.md Signed-off-by: Oleg Zaytsev * goimports fix Signed-off-by: Oleg Zaytsev --- CHANGELOG.md | 1 + pkg/pool/pool.go | 9 ++++--- pkg/pool/pool_test.go | 56 +++++++++++++++++++++++-------------------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ceefc193ae..e28c8a1242e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4663](https://github.com/thanos-io/thanos/pull/4663) Fetcher: Fix discovered data races. - [#4754](https://github.com/thanos-io/thanos/pull/4754) Query: Fix possible panic on stores endpoint. - [#4753](https://github.com/thanos-io/thanos/pull/4753) Store: validate block sync concurrency parameter +- [#4792](https://github.com/thanos-io/thanos/pull/4792) Store: Fix data race in BucketedBytes pool. ## [v0.23.1](https://github.com/thanos-io/thanos/tree/release-0.23) - 2021.10.1 diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index cbd034e9e7c..a7eb98c8540 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -107,8 +107,9 @@ func (p *BucketedBytes) Put(b *[]byte) { return } + sz := cap(*b) for i, bktSize := range p.sizes { - if cap(*b) > bktSize { + if sz > bktSize { continue } *b = (*b)[:0] @@ -118,13 +119,11 @@ func (p *BucketedBytes) Put(b *[]byte) { p.mtx.Lock() defer p.mtx.Unlock() - // We could assume here that our users will not make the slices larger // but lets be on the safe side to avoid an underflow of p.usedTotal. - sz := uint64(cap(*b)) - if sz >= p.usedTotal { + if uint64(sz) >= p.usedTotal { p.usedTotal = 0 } else { - p.usedTotal -= sz + p.usedTotal -= uint64(sz) } } diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index a4140361d2a..14c8350acb4 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -4,8 +4,7 @@ package pool import ( - "bytes" - "fmt" + "strings" "sync" "testing" "time" @@ -71,52 +70,57 @@ func TestRacePutGet(t *testing.T) { s := sync.WaitGroup{} - // Start two goroutines: they always Get and Put two byte slices - // to which they write 'foo' / 'barbazbaz' and check if the data is still + const goroutines = 100 + + // Start multiple goroutines: they always Get and Put two byte slices + // to which they write their contents and check if the data is still // there after writing it, before putting it back. - errs := make(chan error, 2) - stop := make(chan bool, 2) + errs := make(chan error, goroutines) + stop := make(chan struct{}) - f := func(txt string) { + f := func(txt string, grow bool) { defer s.Done() for { select { case <-stop: return default: - c, err := chunkPool.Get(3) - if err != nil { - errs <- errors.Wrapf(err, "goroutine %s", txt) - return - } - - buf := bytes.NewBuffer(*c) - - _, err = fmt.Fprintf(buf, "%s", txt) + c, err := chunkPool.Get(len(txt)) if err != nil { errs <- errors.Wrapf(err, "goroutine %s", txt) return } - if buf.String() != txt { + *c = append(*c, txt...) + if string(*c) != txt { errs <- errors.New("expected to get the data just written") return } + if grow { + *c = append(*c, txt...) + *c = append(*c, txt...) + if string(*c) != txt+txt+txt { + errs <- errors.New("expected to get the data just written") + return + } + } - b := buf.Bytes() - chunkPool.Put(&b) + chunkPool.Put(c) } } } - s.Add(2) - go f("foo") - go f("barbazbaz") - - time.Sleep(5 * time.Second) - stop <- true - stop <- true + for i := 0; i < goroutines; i++ { + s.Add(1) + // make sure we start multiple goroutines with same len buf requirements, to hit same pools + s := strings.Repeat(string(byte(i)), i%10) + // some of the goroutines will append more elements to the provided slice + grow := i%2 == 0 + go f(s, grow) + } + time.Sleep(1 * time.Second) + close(stop) s.Wait() select { case err := <-errs: From 5c958e274e35875393d7c4a986cb16c0970971bf Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Thu, 21 Oct 2021 15:01:20 +0200 Subject: [PATCH 31/33] Added PromQL compatibility automatic e2e test suite. (#4781) * Added PromQL compatibility automatic e2e test suite. Signed-off-by: Bartlomiej Plotka * addressed comments. Signed-off-by: Bartlomiej Plotka --- test/e2e/compatibility_test.go | 107 +++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 test/e2e/compatibility_test.go diff --git a/test/e2e/compatibility_test.go b/test/e2e/compatibility_test.go new file mode 100644 index 00000000000..0ccdc685971 --- /dev/null +++ b/test/e2e/compatibility_test.go @@ -0,0 +1,107 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package e2e_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/efficientgo/e2e" + e2edb "github.com/efficientgo/e2e/db" + "github.com/thanos-io/thanos/pkg/testutil" + "github.com/thanos-io/thanos/test/e2e/e2ethanos" +) + +// Test requires at least ~11m, so run this with `-test.timeout 9999m`. +func TestPromQLCompliance(t *testing.T) { + t.Skip("This is interactive test, it requires time to build up (scrape) the data. The data is also obtain from remote promlab servers.") + + e, err := e2e.NewDockerEnvironment("compatibility") + testutil.Ok(t, err) + t.Cleanup(e.Close) + + // Start separate receive + Querier. + receiverRunnable, err := e2ethanos.NewIngestingReceiver(e, "receive") + testutil.Ok(t, err) + queryReceive := e2edb.NewThanosQuerier(e, "query_receive", []string{receiverRunnable.InternalEndpoint("grpc")}) + testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable, queryReceive)) + + // Start reference Prometheus. + prom := e2edb.NewPrometheus(e, "prom") + testutil.Ok(t, prom.SetConfig(` +global: + scrape_interval: 5s + evaluation_interval: 5s + external_labels: + prometheus: 1 + +remote_write: + - url: "`+e2ethanos.RemoteWriteEndpoint(receiverRunnable.InternalEndpoint("remote-write"))+`" + +scrape_configs: +- job_name: 'demo' + static_configs: + - targets: + - 'demo.promlabs.com:10000' + - 'demo.promlabs.com:10001' + - 'demo.promlabs.com:10002' +`, + )) + testutil.Ok(t, e2e.StartAndWaitReady(prom)) + + // Start separate sidecar + Querier + sidecar := e2edb.NewThanosSidecar(e, "sidecar", prom) + querySidecar := e2edb.NewThanosQuerier(e, "query_sidecar", []string{sidecar.InternalEndpoint("grpc")}) + testutil.Ok(t, e2e.StartAndWaitReady(sidecar, querySidecar)) + + // Start noop promql-compliance-tester. See https://github.com/prometheus/compliance/tree/main/promql on how to build local docker image. + compliance := e.Runnable("promql-compliance-tester").Init(e2e.StartOptions{ + Image: "promql-compliance-tester:latest", + Command: e2e.NewCommandWithoutEntrypoint("tail", "-f", "/dev/null"), + }) + testutil.Ok(t, e2e.StartAndWaitReady(compliance)) + + // Wait 10 minutes for Prometheus to scrape relevant data. + time.Sleep(10 * time.Minute) + + t.Run("receive", func(t *testing.T) { + testutil.Ok(t, ioutil.WriteFile(filepath.Join(compliance.Dir(), "receive.yaml"), + []byte(promLabelsPromQLConfig(prom, queryReceive, []string{"prometheus", "receive", "tenant_id"})), os.ModePerm)) + + stdout, stderr, err := compliance.Exec(e2e.NewCommand("-config-file", filepath.Join(compliance.InternalDir(), "receive.yaml"))) + testutil.Ok(t, err) + fmt.Println(stdout, stderr) + }) + t.Run("sidecar", func(t *testing.T) { + testutil.Ok(t, ioutil.WriteFile(filepath.Join(compliance.Dir(), "sidecar.yaml"), + []byte(promLabelsPromQLConfig(prom, querySidecar, []string{"prometheus"})), os.ModePerm)) + + stdout, stderr, err := compliance.Exec(e2e.NewCommand("-config-file", filepath.Join(compliance.InternalDir(), "sidecar.yaml"))) + testutil.Ok(t, err) + fmt.Println(stdout, stderr) + }) +} + +func promLabelsPromQLConfig(reference *e2edb.Prometheus, target e2e.Runnable, dropLabels []string) string { + return `reference_target_config: + query_url: '` + reference.InternalEndpoint("http") + `' + +test_target_config: + query_url: '` + target.InternalEndpoint("http") + `' + +query_tweaks: + - note: 'Thanos requires adding "external_labels" to distinguish Prometheus servers, leading to extra labels in query results that need to be stripped before comparing results.' + no_bug: true + drop_result_labels: +` + func() (ret string) { + for _, l := range dropLabels { + ret += ` - ` + l + } + return ret + }() +} From 04297643c7e6188e4ddfe6b0c31d38c848ac176c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Giedrius=20Statkevi=C4=8Dius?= Date: Thu, 21 Oct 2021 21:51:34 +0300 Subject: [PATCH 32/33] query: fix deadlock in endpointset (#4795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid RLock()ing twice as described here: https://github.com/thanos-io/thanos/issues/4766#issuecomment-948743455 (due to https://stackoverflow.com/questions/30547916/goroutine-blocks-when-calling-rwmutex-rlock-twice-after-an-rwmutex-unlock/30549188). Fix it by removing HasClients() and simply changing it with `er.clients != nil`. Signed-off-by: Giedrius Statkevičius --- pkg/query/endpointset.go | 17 ++++--------- pkg/query/endpointset_test.go | 45 +++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 12 deletions(-) diff --git a/pkg/query/endpointset.go b/pkg/query/endpointset.go index 727299db1f1..a768b68e921 100644 --- a/pkg/query/endpointset.go +++ b/pkg/query/endpointset.go @@ -687,46 +687,39 @@ func (er *endpointRef) ComponentType() component.Component { return component.FromString(er.metadata.ComponentType) } -func (er *endpointRef) HasClients() bool { - er.mtx.RLock() - defer er.mtx.RUnlock() - - return er.clients != nil -} - func (er *endpointRef) HasStoreAPI() bool { er.mtx.RLock() defer er.mtx.RUnlock() - return er.HasClients() && er.clients.store != nil + return er.clients != nil && er.clients.store != nil } func (er *endpointRef) HasRulesAPI() bool { er.mtx.RLock() defer er.mtx.RUnlock() - return er.HasClients() && er.clients.rule != nil + return er.clients != nil && er.clients.rule != nil } func (er *endpointRef) HasTargetsAPI() bool { er.mtx.RLock() defer er.mtx.RUnlock() - return er.HasClients() && er.clients.target != nil + return er.clients != nil && er.clients.target != nil } func (er *endpointRef) HasMetricMetadataAPI() bool { er.mtx.RLock() defer er.mtx.RUnlock() - return er.HasClients() && er.clients.metricMetadata != nil + return er.clients != nil && er.clients.metricMetadata != nil } func (er *endpointRef) HasExemplarsAPI() bool { er.mtx.RLock() defer er.mtx.RUnlock() - return er.HasClients() && er.clients.exemplar != nil + return er.clients != nil && er.clients.exemplar != nil } func (er *endpointRef) LabelSets() []labels.Labels { diff --git a/pkg/query/endpointset_test.go b/pkg/query/endpointset_test.go index 5dc7eefa450..f6904e8223d 100644 --- a/pkg/query/endpointset_test.go +++ b/pkg/query/endpointset_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "golang.org/x/sync/errgroup" "google.golang.org/grpc" "github.com/pkg/errors" @@ -1185,3 +1186,47 @@ func assertRegisteredAPIs(t *testing.T, expectedAPIs *APIs, er *endpointRef) { testutil.Equals(t, expectedAPIs.metricMetadata, er.HasMetricMetadataAPI()) testutil.Equals(t, expectedAPIs.exemplars, er.HasExemplarsAPI()) } + +// Regression test for: https://github.com/thanos-io/thanos/issues/4766. +func TestDeadlockLocking(t *testing.T) { + t.Parallel() + + mockEndpointRef := &endpointRef{ + addr: "mockedStore", + metadata: &endpointMetadata{ + &infopb.InfoResponse{}, + }, + clients: &endpointClients{}, + } + + g := &errgroup.Group{} + deadline := time.Now().Add(3 * time.Second) + + g.Go(func() error { + for { + if time.Now().After(deadline) { + break + } + mockEndpointRef.Update(&endpointMetadata{ + InfoResponse: &infopb.InfoResponse{}, + }) + } + return nil + }) + + g.Go(func() error { + for { + if time.Now().After(deadline) { + break + } + mockEndpointRef.HasStoreAPI() + mockEndpointRef.HasExemplarsAPI() + mockEndpointRef.HasMetricMetadataAPI() + mockEndpointRef.HasRulesAPI() + mockEndpointRef.HasTargetsAPI() + } + return nil + }) + + testutil.Ok(t, g.Wait()) +} From 18049504408c5ae09deb76961b92baf7f96b0e93 Mon Sep 17 00:00:00 2001 From: Aditi Ahuja <48997495+metonymic-smokey@users.noreply.github.com> Date: Fri, 22 Oct 2021 10:42:42 +0530 Subject: [PATCH 33/33] fixed log message (#4796) Signed-off-by: metonymic-smokey --- cmd/thanos/compact.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index b4ff8f56902..6e01c21110e 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -448,7 +448,7 @@ func runCompact( // TODO(bwplotka): Find a way to avoid syncing if no op was done. if err := sy.SyncMetas(ctx); err != nil { - return errors.Wrap(err, "sync before first pass of downsampling") + return errors.Wrap(err, "sync before retention") } if err := compact.ApplyRetentionPolicyByResolution(ctx, logger, bkt, sy.Metas(), retentionByResolution, compactMetrics.blocksMarked.WithLabelValues(metadata.DeletionMarkFilename, "")); err != nil {