diff --git a/quesma/clickhouse/util.go b/quesma/clickhouse/util.go index ece0c1e9e..885a92b41 100644 --- a/quesma/clickhouse/util.go +++ b/quesma/clickhouse/util.go @@ -156,3 +156,51 @@ func TimestampGroupBy(timestampField model.Expr, typ DateTimeType, groupByInterv return model.NewLiteral("invalid") // maybe create new type InvalidExpr? } } + +func TimestampGroupByWithTimezone(timestampField model.Expr, typ DateTimeType, + groupByInterval time.Duration, timezone string) model.Expr { + + // If no timezone, or timezone is default (UTC), we just return TimestampGroupBy(...) + if timezone == "" { + return TimestampGroupBy(timestampField, typ, groupByInterval) + } + + createAExp := func(innerFuncName string, interval, offsetMultiplier int64) model.Expr { + var offset model.Expr + offset = model.NewFunction( + "timeZoneOffset", + model.NewFunction( + "toTimezone", + timestampField, model.NewLiteral("'"+timezone+"'"), + ), + ) + if offsetMultiplier != 1 { + offset = model.NewInfixExpr(offset, "*", model.NewLiteral(offsetMultiplier)) + } + + unixTsWithOffset := model.NewInfixExpr( + model.NewFunction(innerFuncName, timestampField), + "+", + offset, + ) + + groupByExpr := model.NewInfixExpr( + model.NewParenExpr(unixTsWithOffset), + " / ", // TODO nasty hack to make our string-based tests pass. Operator should not contain spaces obviously + model.NewLiteral(interval), + ) + + return model.NewFunction("toInt64", groupByExpr) + } + + switch typ { + case DateTime64: + // e.g: (toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000 + return createAExp("toUnixTimestamp64Milli", groupByInterval.Milliseconds(), 1000) + case DateTime: + return createAExp("toUnixTimestamp", groupByInterval.Milliseconds()/1000, 1) + default: + logger.Error().Msgf("invalid timestamp fieldname: %s", timestampFieldName) + return model.NewLiteral("invalid") // maybe create new type InvalidExpr? + } +} diff --git a/quesma/model/bucket_aggregations/date_histogram.go b/quesma/model/bucket_aggregations/date_histogram.go index 1b37cbb14..9592ca245 100644 --- a/quesma/model/bucket_aggregations/date_histogram.go +++ b/quesma/model/bucket_aggregations/date_histogram.go @@ -4,6 +4,7 @@ package bucket_aggregations import ( "context" + "fmt" "quesma/clickhouse" "quesma/kibana" "quesma/logger" @@ -26,14 +27,15 @@ type DateHistogram struct { ctx context.Context field model.Expr // name of the field, e.g. timestamp interval string + timezone string minDocCount int intervalType DateHistogramIntervalType fieldDateTimeType clickhouse.DateTimeType } -func NewDateHistogram(ctx context.Context, field model.Expr, interval string, +func NewDateHistogram(ctx context.Context, field model.Expr, interval, timezone string, minDocCount int, intervalType DateHistogramIntervalType, fieldDateTimeType clickhouse.DateTimeType) *DateHistogram { - return &DateHistogram{ctx: ctx, field: field, interval: interval, + return &DateHistogram{ctx: ctx, field: field, interval: interval, timezone: timezone, minDocCount: minDocCount, intervalType: intervalType, fieldDateTimeType: fieldDateTimeType} } @@ -66,6 +68,13 @@ func (query *DateHistogram) TranslateSqlResponseToJson(rows []model.QueryResultR rows = query.NewRowsTransformer().Transform(query.ctx, rows) } + // key is in `query.timezone` time, and we need it to be UTC + wantedTimezone, err := time.LoadLocation(query.timezone) + if err != nil { + logger.ErrorWithCtx(query.ctx).Msgf("time.LoadLocation error: %v", err) + wantedTimezone = time.UTC + } + var response []model.JsonMap for _, row := range rows { var key int64 @@ -76,11 +85,16 @@ func (query *DateHistogram) TranslateSqlResponseToJson(rows []model.QueryResultR key = query.getKey(row) * intervalInMilliseconds } - intervalStart := time.UnixMilli(key).UTC().Format("2006-01-02T15:04:05.000") + ts := time.UnixMilli(key).UTC() + intervalStartNotUTC := time.Date(ts.Year(), ts.Month(), ts.Day(), ts.Hour(), ts.Minute(), ts.Second(), ts.Nanosecond(), wantedTimezone) + + _, timezoneOffsetInSeconds := intervalStartNotUTC.Zone() + key -= int64(timezoneOffsetInSeconds * 1000) // seconds -> milliseconds + response = append(response, model.JsonMap{ "key": key, "doc_count": row.LastColValue(), // used to be [level], but because some columns are duplicated, it doesn't work in 100% cases now - "key_as_string": intervalStart, + "key_as_string": time.UnixMilli(key).UTC().Format("2006-01-02T15:04:05.000"), }) } @@ -134,17 +148,22 @@ func (query *DateHistogram) generateSQLForFixedInterval() model.Expr { logger.ErrorWithCtx(query.ctx).Msgf("invalid date type for DateHistogram %+v. Using DateTime64 as default.", query) dateTimeType = defaultDateTimeType } - return clickhouse.TimestampGroupBy(query.field, dateTimeType, interval) + return clickhouse.TimestampGroupByWithTimezone(query.field, dateTimeType, interval, query.timezone) } func (query *DateHistogram) generateSQLForCalendarInterval() model.Expr { exprForBiggerIntervals := func(toIntervalStartFuncName string) model.Expr { // returned expr as string: - // "1000 * toInt64(toUnixTimestamp(toStartOf[Week|Month|Quarter|Year](timestamp)))" - toStartOf := model.NewFunction(toIntervalStartFuncName, query.field) - toUnixTimestamp := model.NewFunction("toUnixTimestamp", toStartOf) - toInt64 := model.NewFunction("toInt64", toUnixTimestamp) - return model.NewInfixExpr(toInt64, "*", model.NewLiteral(1000)) + // a) "1000 * toInt64(toUnixTimestamp(toStartOf[Week|Month|Quarter|Year](timestamp)))" (with no timezone offset) + // b) as above, but replace timestamp -> toTimeZone(timestamp, timezone) (with timezone present) + timestampFieldWithOffset := query.field + if query.timezone != "" { + timestampFieldWithOffset = model.NewFunction("toTimezone", query.field, model.NewLiteral(fmt.Sprintf("'%s'", query.timezone))) + } + toStartOf := model.NewFunction(toIntervalStartFuncName, timestampFieldWithOffset) // toStartOfMonth(...) or toStartOfWeek(...) + toUnixTimestamp := model.NewFunction("toUnixTimestamp", toStartOf) // toUnixTimestamp(toStartOf...) + toInt64 := model.NewFunction("toInt64", toUnixTimestamp) // toInt64(toUnixTimestamp(...)) + return model.NewInfixExpr(toInt64, "*", model.NewLiteral(1000)) // toInt64(...)*1000 } // calendar_interval: minute/hour/day are the same as fixed_interval: 1m/1h/1d diff --git a/quesma/model/equal.go b/quesma/model/equal.go index 55b09e107..eeed81ee4 100644 --- a/quesma/model/equal.go +++ b/quesma/model/equal.go @@ -46,6 +46,18 @@ func PartlyImplementedIsEqual(a, b Expr) bool { } return true } + case ParenExpr: + if bTyped, ok := b.(ParenExpr); ok { + if len(aTyped.Exprs) != len(bTyped.Exprs) { + return false + } + for i := range aTyped.Exprs { + if !PartlyImplementedIsEqual(aTyped.Exprs[i], bTyped.Exprs[i]) { + return false + } + } + return true + } } return false } diff --git a/quesma/model/expr_string_renderer.go b/quesma/model/expr_string_renderer.go index 7fc7026fb..917506823 100644 --- a/quesma/model/expr_string_renderer.go +++ b/quesma/model/expr_string_renderer.go @@ -66,19 +66,7 @@ func (v *renderer) VisitFunction(e FunctionExpr) interface{} { } func (v *renderer) VisitLiteral(l LiteralExpr) interface{} { - - if l.Value == "*" { - return "*" - } - - switch l.Value.(type) { - case string: - return fmt.Sprintf("%s", l.Value) - case float64: - return fmt.Sprintf("%f", l.Value) - default: - return fmt.Sprintf("%v", l.Value) - } + return fmt.Sprintf("%v", l.Value) } func (v *renderer) VisitInfix(e InfixExpr) interface{} { diff --git a/quesma/model/expr_test.go b/quesma/model/expr_test.go index b9c562092..9664b2f78 100644 --- a/quesma/model/expr_test.go +++ b/quesma/model/expr_test.go @@ -12,5 +12,5 @@ func TestParenExpr(t *testing.T) { NewInfixExpr( NewFunction("floor", NewLiteral(1.5)), "+", NewLiteral(2.5))), "/", NewLiteral(3.5)) - assert.Equal(t, "(floor(1.500000)+2.500000)/3.500000", AsString(parenExpr)) + assert.Equal(t, "(floor(1.5)+2.5)/3.5", AsString(parenExpr)) } diff --git a/quesma/queryparser/aggregation_parser.go b/quesma/queryparser/aggregation_parser.go index 05314b86a..9fdf51672 100644 --- a/quesma/queryparser/aggregation_parser.go +++ b/quesma/queryparser/aggregation_parser.go @@ -731,6 +731,7 @@ func (cw *ClickhouseQueryTranslator) tryBucketAggregation(currentAggr *aggrQuery } field := cw.parseFieldField(dateHistogram, "date_histogram") minDocCount := cw.parseMinDocCount(dateHistogram) + timezone := cw.parseStringField(dateHistogram, "time_zone", "") interval, intervalType := cw.extractInterval(dateHistogram) dateTimeType := cw.Table.GetDateTimeTypeFromExpr(cw.Ctx, field) @@ -738,7 +739,7 @@ func (cw *ClickhouseQueryTranslator) tryBucketAggregation(currentAggr *aggrQuery logger.WarnWithCtx(cw.Ctx).Msgf("invalid date time type for field %s", field) } - dateHistogramAggr := bucket_aggregations.NewDateHistogram(cw.Ctx, field, interval, minDocCount, intervalType, dateTimeType) + dateHistogramAggr := bucket_aggregations.NewDateHistogram(cw.Ctx, field, interval, timezone, minDocCount, intervalType, dateTimeType) currentAggr.Type = dateHistogramAggr sqlQuery := dateHistogramAggr.GenerateSQL() @@ -1080,6 +1081,16 @@ func (cw *ClickhouseQueryTranslator) parseFloatField(queryMap QueryMap, fieldNam return defaultValue } +func (cw *ClickhouseQueryTranslator) parseStringField(queryMap QueryMap, fieldName string, defaultValue string) string { + if valueRaw, exists := queryMap[fieldName]; exists { + if asString, ok := valueRaw.(string); ok { + return asString + } + logger.WarnWithCtx(cw.Ctx).Msgf("%s is not a string, but %T, value: %v. Using default: %s", fieldName, valueRaw, valueRaw, defaultValue) + } + return defaultValue +} + // parseFieldFieldMaybeScript is basically almost a copy of parseFieldField above, but it also handles a basic script, if "field" is missing. func (cw *ClickhouseQueryTranslator) parseFieldFieldMaybeScript(shouldBeMap any, aggregationType string) (field model.Expr, isFromScript bool) { Map, ok := shouldBeMap.(QueryMap) diff --git a/quesma/queryparser/aggregation_parser_test.go b/quesma/queryparser/aggregation_parser_test.go index 3380f653a..dcaf1ac6c 100644 --- a/quesma/queryparser/aggregation_parser_test.go +++ b/quesma/queryparser/aggregation_parser_test.go @@ -5,6 +5,7 @@ package queryparser import ( "cmp" "context" + "fmt" "github.com/jinzhu/copier" "github.com/stretchr/testify/assert" "quesma/clickhouse" @@ -168,8 +169,7 @@ var aggregationTests = []struct { "min": 1706881636029 }, "field": "timestamp", - "fixed_interval": "3h", - "time_zone": "Europe/Warsaw" + "fixed_interval": "3h" } } }, @@ -418,8 +418,7 @@ var aggregationTests = []struct { "max": 1707818397034, "min": 1707213597034 }, - "field": "order_date", - "time_zone": "Europe/Warsaw" + "field": "order_date" } } }, @@ -542,8 +541,7 @@ var aggregationTests = []struct { "date_histogram": { "field": "order_date", "fixed_interval": "12h", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -649,9 +647,9 @@ var aggregationTests = []struct { "size": 0 }`, []string{ - `SELECT floor("bytes"/1782.000000)*1782.000000, count() FROM ` + tableName + ` ` + - `GROUP BY floor("bytes"/1782.000000)*1782.000000 ` + - `ORDER BY floor("bytes"/1782.000000)*1782.000000`, + `SELECT floor("bytes"/1782)*1782, count() FROM ` + tableName + ` ` + + `GROUP BY floor("bytes"/1782)*1782 ` + + `ORDER BY floor("bytes"/1782)*1782`, `SELECT count() FROM ` + tableName, }, }, @@ -723,17 +721,26 @@ func sortAggregations(aggregations []*model.Query) { } func allAggregationTests() []testdata.AggregationTestCase { - const lowerBoundTestNr = 80 + const lowerBoundTestNr = 90 allTests := make([]testdata.AggregationTestCase, 0, lowerBoundTestNr) - allTests = append(allTests, testdata.AggregationTests...) - allTests = append(allTests, testdata.AggregationTests2...) - allTests = append(allTests, opensearch_visualize.AggregationTests...) - allTests = append(allTests, dashboard_1.AggregationTests...) - allTests = append(allTests, testdata.PipelineAggregationTests...) - allTests = append(allTests, opensearch_visualize.PipelineAggregationTests...) - allTests = append(allTests, kibana_visualize.AggregationTests...) - allTests = append(allTests, clients.KunkkaTests...) - allTests = append(allTests, clients.OpheliaTests...) + + add := func(testsToAdd []testdata.AggregationTestCase, testFilename string) { + for i, test := range testsToAdd { + test.TestName = fmt.Sprintf("%s(file:%s,nr:%d)", test.TestName, testFilename, i) + allTests = append(allTests, test) + } + } + + add(testdata.AggregationTests, "agg_req") + add(testdata.AggregationTests2, "agg_req_2") + add(opensearch_visualize.AggregationTests, "opensearch-visualize/agg_req") + add(dashboard_1.AggregationTests, "dashboard-1/agg_req") + add(testdata.PipelineAggregationTests, "pipeline_agg_req") + add(opensearch_visualize.PipelineAggregationTests, "opensearch-visualize/pipeline_agg_req") + add(kibana_visualize.AggregationTests, "kibana-visualize/agg_r") + add(clients.KunkkaTests, "clients/kunkka") + add(clients.OpheliaTests, "clients/ophelia") + return allTests } @@ -778,10 +785,10 @@ func TestAggregationParserExternalTestcases(t *testing.T) { cw := ClickhouseQueryTranslator{ClickhouseLM: lm, Table: &table, Ctx: context.Background(), SchemaRegistry: s, Config: cfg} for i, test := range allAggregationTests() { t.Run(test.TestName+"("+strconv.Itoa(i)+")", func(t *testing.T) { - if test.TestName == "Max/Sum bucket with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max (Sum) Bucket (Aggregation: Date Histogram, Metric: Min)" { + if test.TestName == "Max/Sum bucket with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max (Sum) Bucket (Aggregation: Date Histogram, Metric: Min)(file:opensearch-visualize/pipeline_agg_req,nr:18)" { t.Skip("Needs to be fixed by keeping last key for every aggregation. Now we sometimes don't know it. Hard to reproduce, leaving it for separate PR") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram" { + if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:22)" { t.Skip("Waiting for fix. Now we handle only the case where pipeline agg is at the same nesting level as its parent. Should be quick to fix.") } if i == 27 || i == 29 || i == 30 { @@ -790,7 +797,7 @@ func TestAggregationParserExternalTestcases(t *testing.T) { if strings.HasPrefix(test.TestName, "dashboard-1") { t.Skip("Those 2 tests have nested histograms with min_doc_count=0. Some work done long time ago (Krzysiek)") } - if test.TestName == "Range with subaggregations. Reproduce: Visualize -> Pie chart -> Aggregation: Top Hit, Buckets: Aggregation: Range" { + if test.TestName == "Range with subaggregations. Reproduce: Visualize -> Pie chart -> Aggregation: Top Hit, Buckets: Aggregation: Range(file:opensearch-visualize/agg_req,nr:1)" { t.Skip("Need a (most likely) small fix to top_hits.") } if i == 20 { @@ -801,15 +808,15 @@ func TestAggregationParserExternalTestcases(t *testing.T) { } if test.TestName == "it's the same input as in previous test, but with the original output from Elastic."+ "Skipped for now, as our response is different in 2 things: key_as_string date (probably not important) + we don't return 0's (e.g. doc_count: 0)."+ - "If we need clients/kunkka/test_0, used to be broken before aggregations merge fix" { + "If we need clients/kunkka/test_0, used to be broken before aggregations merge fix(file:clients/kunkka,nr:1)" { t.Skip("Unskip and remove the previous test after those fixes.") } - if test.TestName == "clients/kunkka/test_1, used to be broken before aggregations merge fix" { + if test.TestName == "clients/kunkka/test_1, used to be broken before aggregations merge fix(file:clients/kunkka,nr:2)" { t.Skip("Small details left for this test to be correct. I'll (Krzysiek) fix soon after returning to work") } - if test.TestName == "Ophelia Test 3: 5x terms + a lot of other aggregations" || - test.TestName == "Ophelia Test 6: triple terms + other aggregations + order by another aggregations" || - test.TestName == "Ophelia Test 7: 5x terms + a lot of other aggregations + different order bys" { + if test.TestName == "Ophelia Test 3: 5x terms + a lot of other aggregations(file:clients/ophelia,nr:2)" || + test.TestName == "Ophelia Test 6: triple terms + other aggregations + order by another aggregations(file:clients/ophelia,nr:5)" || + test.TestName == "Ophelia Test 7: 5x terms + a lot of other aggregations + different order bys(file:clients/ophelia,nr:6)" { t.Skip("Very similar to 2 previous tests, results have like 500-1000 lines. They are almost finished though. Maybe I'll fix soon, but not in this PR") } diff --git a/quesma/queryparser/pancake_aggregation_parser_buckets.go b/quesma/queryparser/pancake_aggregation_parser_buckets.go index 60f3a1eb7..f7308b727 100644 --- a/quesma/queryparser/pancake_aggregation_parser_buckets.go +++ b/quesma/queryparser/pancake_aggregation_parser_buckets.go @@ -72,6 +72,7 @@ func (cw *ClickhouseQueryTranslator) pancakeTryBucketAggregation(aggregation *pa } field := cw.parseFieldField(dateHistogram, "date_histogram") minDocCount := cw.parseMinDocCount(dateHistogram) + timezone := cw.parseStringField(dateHistogram, "time_zone", "") interval, intervalType := cw.extractInterval(dateHistogram) dateTimeType := cw.Table.GetDateTimeTypeFromExpr(cw.Ctx, field) @@ -79,7 +80,8 @@ func (cw *ClickhouseQueryTranslator) pancakeTryBucketAggregation(aggregation *pa return false, fmt.Errorf("invalid date time type for field %s", field) } - dateHistogramAggr := bucket_aggregations.NewDateHistogram(cw.Ctx, field, interval, minDocCount, intervalType, dateTimeType) + dateHistogramAggr := bucket_aggregations.NewDateHistogram( + cw.Ctx, field, interval, timezone, minDocCount, intervalType, dateTimeType) aggregation.queryType = dateHistogramAggr sqlQuery := dateHistogramAggr.GenerateSQL() diff --git a/quesma/queryparser/pancake_sql_query_generation_test.go b/quesma/queryparser/pancake_sql_query_generation_test.go index 91cff6f36..60885cf22 100644 --- a/quesma/queryparser/pancake_sql_query_generation_test.go +++ b/quesma/queryparser/pancake_sql_query_generation_test.go @@ -58,11 +58,11 @@ func TestPancakeQueryGeneration(t *testing.T) { if filters(test.TestName) { t.Skip("Fix filters") } - if test.TestName == "Max/Sum bucket with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max (Sum) Bucket (Aggregation: Date Histogram, Metric: Min)" { + if test.TestName == "Max/Sum bucket with some null buckets. Reproduce: Visualize -> Vertical Bar: Metrics: Max (Sum) Bucket (Aggregation: Date Histogram, Metric: Min)(file:opensearch-visualize/pipeline_agg_req,nr:18)" { t.Skip("Need fix with date keys in pipeline aggregations.") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram" { + if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:22)" { t.Skip("error: filter(s)/range/dataRange aggregation must be the last bucket aggregation") } @@ -172,12 +172,12 @@ func TestPancakeQueryGeneration(t *testing.T) { // We generate correct SQL, but result JSON did not match func incorrectResult(testName string) bool { - t1 := testName == "date_range aggregation" // we use relative time - t2 := testName == "complex filters" // almost, we differ in doc 0 counts + t1 := testName == "date_range aggregation(file:agg_req,nr:22)" // we use relative time + t2 := testName == "complex filters(file:agg_req,nr:18)" // almost, we differ in doc 0 counts // to be deleted after pancakes t3 := testName == "clients/kunkka/test_0, used to be broken before aggregations merge fix"+ "Output more or less works, but is different and worse than what Elastic returns."+ - "If it starts failing, maybe that's a good thing" + "If it starts failing, maybe that's a good thing(file:clients/kunkka,nr:0)" // below test is replacing it // testName == "it's the same input as in previous test, but with the original output from Elastic."+ // "Skipped for now, as our response is different in 2 things: key_as_string date (probably not important) + we don't return 0's (e.g. doc_count: 0)."+ @@ -187,23 +187,23 @@ func incorrectResult(testName string) bool { // TODO remove after fix func topHits(testName string) bool { - t1 := testName == "Range with subaggregations. Reproduce: Visualize -> Pie chart -> Aggregation: Top Hit, Buckets: Aggregation: Range" // also range + t1 := testName == "Range with subaggregations. Reproduce: Visualize -> Pie chart -> Aggregation: Top Hit, Buckets: Aggregation: Range(file:opensearch-visualize/agg_req,nr:1)" // also range return t1 } // TODO remove after fix func topMetrics(testName string) bool { - t1 := testName == "Kibana Visualize -> Last Value. Used to panic" // also filter - t2 := testName == "simplest top_metrics, no sort" - t3 := testName == "simplest top_metrics, with sort" - t4 := testName == "very long: multiple top_metrics + histogram" // also top_metrics + t1 := testName == "Kibana Visualize -> Last Value. Used to panic(file:agg_req,nr:31)" // also filter + t2 := testName == "simplest top_metrics, no sort(file:agg_req,nr:38)" + t3 := testName == "simplest top_metrics, with sort(file:agg_req,nr:39)" + t4 := testName == "very long: multiple top_metrics + histogram(file:agg_req,nr:10)" // also top_metrics return t1 || t2 || t3 || t4 } // TODO remove after fix func filters(testName string) bool { // this works, but is very suboptimal and didn't update the test case - t1 := testName == "clients/kunkka/test_1, used to be broken before aggregations merge fix" // multi level filters + t1 := testName == "clients/kunkka/test_1, used to be broken before aggregations merge fix(file:clients/kunkka,nr:2)" // multi level filters return t1 } diff --git a/quesma/testdata/aggregation_requests.go b/quesma/testdata/aggregation_requests.go index d87df74c4..c0190e731 100644 --- a/quesma/testdata/aggregation_requests.go +++ b/quesma/testdata/aggregation_requests.go @@ -515,18 +515,18 @@ var AggregationTests = []AggregationTestCase{ "buckets": [ { "doc_count": 2, - "key": 1706875200000, - "key_as_string": "2024-02-02T12:00:00.000" + "key": 1706871600000, + "key_as_string": "2024-02-02T11:00:00.000" }, { "doc_count": 27, - "key": 1706886000000, - "key_as_string": "2024-02-02T15:00:00.000" + "key": 1706882400000, + "key_as_string": "2024-02-02T14:00:00.000" }, { "doc_count": 34, - "key": 1706896800000, - "key_as_string": "2024-02-02T18:00:00.000" + "key": 1706893200000, + "key_as_string": "2024-02-02T17:00:00.000" } ] }, @@ -538,13 +538,13 @@ var AggregationTests = []AggregationTestCase{ "buckets": [ { "doc_count": 0, - "key": 1706875200000, - "key_as_string": "2024-02-02T12:00:00.000" + "key": 1706871600000, + "key_as_string": "2024-02-02T11:00:00.000" }, { "doc_count": 2, - "key": 1706886000000, - "key_as_string": "2024-02-02T15:00:00.000" + "key": 1706882400000, + "key_as_string": "2024-02-02T14:00:00.000" } ] }, @@ -665,14 +665,17 @@ var AggregationTests = []AggregationTestCase{ `GROUP BY "FlightDelayType" ` + `ORDER BY count() DESC, "FlightDelayType" ` + `LIMIT 10) ` + - `SELECT "FlightDelayType", toInt64(toUnixTimestamp64Milli("timestamp") / 10800000), count() ` + + `SELECT "FlightDelayType", toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 10800000), count() ` + `FROM ` + TableName + ` ` + `INNER JOIN "cte_1" ON "FlightDelayType" = "cte_1_1" ` + `WHERE (("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND ` + `"timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) ` + `AND "FlightDelayType" IS NOT NULL) ` + - `GROUP BY "FlightDelayType", toInt64(toUnixTimestamp64Milli("timestamp") / 10800000), cte_1_cnt ` + - `ORDER BY cte_1_cnt DESC, "FlightDelayType", toInt64(toUnixTimestamp64Milli("timestamp") / 10800000)`, + `GROUP BY "FlightDelayType", toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 10800000), cte_1_cnt ` + + `ORDER BY cte_1_cnt DESC, "FlightDelayType", toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 10800000)`, `SELECT "FlightDelayType", count() FROM ` + TableName + ` ` + `WHERE (("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) ` + @@ -696,14 +699,15 @@ var AggregationTests = []AggregationTestCase{ "FlightDelayType" AS "aggr__0__key_0", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", sum(count()) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__order_1", - toInt64(toUnixTimestamp64Milli("timestamp") / 10800000) AS - "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", + count(*) AS "aggr__0__1__count" FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) GROUP BY "FlightDelayType" AS "aggr__0__key_0", - toInt64(toUnixTimestamp64Milli("timestamp") / 10800000) AS - "aggr__0__1__key_0")) + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) WHERE "aggr__0__order_1_rank"<=11 ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, }, @@ -2075,13 +2079,13 @@ var AggregationTests = []AggregationTestCase{ "buckets": [ { "doc_count": 22, - "key": 1707480000000, - "key_as_string": "2024-02-09T12:00:00.000" + "key": 1707476400000, + "key_as_string": "2024-02-09T11:00:00.000" }, { "doc_count": 80, - "key": 1707490800000, - "key_as_string": "2024-02-09T15:00:00.000" + "key": 1707487200000, + "key_as_string": "2024-02-09T14:00:00.000" } ] }, @@ -2093,13 +2097,13 @@ var AggregationTests = []AggregationTestCase{ "buckets": [ { "doc_count": 17, - "key": 1707480000000, - "key_as_string": "2024-02-09T12:00:00.000" + "key": 1707476400000, + "key_as_string": "2024-02-09T11:00:00.000" }, { "doc_count": 32, - "key": 1707490800000, - "key_as_string": "2024-02-09T15:00:00.000" + "key": 1707487200000, + "key_as_string": "2024-02-09T14:00:00.000" } ] }, @@ -2111,13 +2115,13 @@ var AggregationTests = []AggregationTestCase{ "buckets": [ { "doc_count": 5, - "key": 1707480000000, - "key_as_string": "2024-02-09T12:00:00.000" + "key": 1707476400000, + "key_as_string": "2024-02-09T11:00:00.000" }, { "doc_count": 11, - "key": 1707490800000, - "key_as_string": "2024-02-09T15:00:00.000" + "key": 1707487200000, + "key_as_string": "2024-02-09T14:00:00.000" } ] }, @@ -2256,14 +2260,17 @@ var AggregationTests = []AggregationTestCase{ `GROUP BY "severity" ` + `ORDER BY count() DESC, "severity" ` + `LIMIT 3) ` + - `SELECT "severity", toInt64(toUnixTimestamp64Milli("@timestamp") / 10800000), count() ` + + `SELECT "severity", toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone(` + + `"@timestamp",'Europe/Warsaw'))*1000) / 10800000), count() ` + `FROM ` + TableName + ` ` + `INNER JOIN "cte_1" ON "severity" = "cte_1_1" ` + `WHERE (("host.name" iLIKE '%prometheus%' AND ("@timestamp">=parseDateTime64BestEffort('2024-02-02T16:36:49.940Z') ` + `AND "@timestamp"<=parseDateTime64BestEffort('2024-02-09T16:36:49.940Z'))) ` + `AND "severity" IS NOT NULL) ` + - `GROUP BY "severity", toInt64(toUnixTimestamp64Milli("@timestamp") / 10800000), cte_1_cnt ` + - `ORDER BY cte_1_cnt DESC, "severity", toInt64(toUnixTimestamp64Milli("@timestamp") / 10800000)`, + `GROUP BY "severity", toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone(` + + `"@timestamp",'Europe/Warsaw'))*1000) / 10800000), cte_1_cnt ` + + `ORDER BY cte_1_cnt DESC, "severity", toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone(` + + `"@timestamp",'Europe/Warsaw'))*1000) / 10800000)`, `SELECT "severity", count() ` + `FROM ` + TableName + ` ` + `WHERE (("host.name" iLIKE '%prometheus%' ` + @@ -2289,15 +2296,16 @@ var AggregationTests = []AggregationTestCase{ "severity" AS "aggr__0__key_0", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", sum(count()) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__order_1", - toInt64(toUnixTimestamp64Milli("@timestamp") / 10800000) AS - "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" + toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", + count(*) AS "aggr__0__1__count" FROM ` + TableName + ` WHERE ("host.name" iLIKE '%prometheus%' AND ("@timestamp">= parseDateTime64BestEffort('2024-02-02T16:36:49.940Z') AND "@timestamp"<= parseDateTime64BestEffort('2024-02-09T16:36:49.940Z'))) GROUP BY "severity" AS "aggr__0__key_0", - toInt64(toUnixTimestamp64Milli("@timestamp") / 10800000) AS - "aggr__0__1__key_0")) + toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) WHERE "aggr__0__order_1_rank"<=4 ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, }, @@ -2468,8 +2476,8 @@ var AggregationTests = []AggregationTestCase{ ] }, "doc_count": 2, - "key": 1707480000000, - "key_as_string": "2024-02-09T12:00:00.000" + "key": 1707476400000, + "key_as_string": "2024-02-09T11:00:00.000" }, { "4": { @@ -2497,8 +2505,8 @@ var AggregationTests = []AggregationTestCase{ ] }, "doc_count": 1, - "key": 1707739200000, - "key_as_string": "2024-02-12T12:00:00.000" + "key": 1707735600000, + "key_as_string": "2024-02-12T11:00:00.000" }, { "4": { @@ -2526,8 +2534,8 @@ var AggregationTests = []AggregationTestCase{ ] }, "doc_count": 1, - "key": 1707782400000, - "key_as_string": "2024-02-13T00:00:00.000" + "key": 1707778800000, + "key_as_string": "2024-02-12T23:00:00.000" } ] }, @@ -2571,33 +2579,46 @@ var AggregationTests = []AggregationTestCase{ }, ExpectedPancakeResults: make([]model.QueryResultRow, 0), ExpectedSQLs: []string{ - `SELECT toInt64(toUnixTimestamp64Milli("order_date") / 43200000), maxOrNull("order_date") AS "windowed_order_date", ` + + `SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000), ` + + `maxOrNull("order_date") AS "windowed_order_date", ` + `maxOrNull("order_date") AS "windowed_order_date" FROM ` + `(SELECT "order_date", "order_date", ROW_NUMBER() OVER ` + - `(PARTITION BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) ` + + `(PARTITION BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000) ` + `ORDER BY "order_date" ASC) AS "row_number", "taxful_total_price" FROM ` + TableName + " " + `WHERE (("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND ` + `"order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250')) ` + `WHERE ((("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND ` + `"order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250') AND "row_number"<=10) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000)`, - `SELECT toInt64(toUnixTimestamp64Milli("order_date") / 43200000), maxOrNull("taxful_total_price") AS "windowed_taxful_total_price", ` + + `GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000)`, + `SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000), ` + + `maxOrNull("taxful_total_price") AS "windowed_taxful_total_price", ` + `maxOrNull("order_date") AS "windowed_order_date" FROM ` + `(SELECT "taxful_total_price", "order_date", ROW_NUMBER() OVER ` + - `(PARTITION BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) ` + + `(PARTITION BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000) ` + `ORDER BY "order_date" ASC) AS "row_number" FROM ` + TableName + " " + `WHERE (("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND ` + `"order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250')) ` + `WHERE ((("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND ` + `"order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250') AND "row_number"<=10) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000)`, - `SELECT toInt64(toUnixTimestamp64Milli("order_date") / 43200000), count() FROM ` + TableName + " " + + `GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000)`, + `SELECT toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000), count() FROM ` + TableName + " " + `WHERE (("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND ` + `"order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250') ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone(` + + `"order_date",'Europe/Warsaw'))*1000) / 43200000)`, `SELECT count() FROM ` + TableName + ` WHERE (("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') ` + `AND "order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) AND "taxful_total_price" > '250')`, }, @@ -2859,8 +2880,7 @@ var AggregationTests = []AggregationTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -3359,8 +3379,7 @@ var AggregationTests = []AggregationTestCase{ "max": 1708969256351, "min": 1708364456351 }, - "field": "order_date", - "time_zone": "Europe/Warsaw" + "field": "order_date" } } }, @@ -3656,7 +3675,6 @@ var AggregationTests = []AggregationTestCase{ "date_histogram": { "field": "order_date", "fixed_interval": "12h", - "time_zone": "Europe/Warsaw", "extended_bounds": { "min": 1708627654149, "max": 1709232454149 @@ -3670,8 +3688,7 @@ var AggregationTests = []AggregationTestCase{ { "query_string": { "query": "products.product_name:*watch*", - "analyze_wildcard": true, - "time_zone": "Europe/Warsaw" + "analyze_wildcard": true } } ], @@ -3896,8 +3913,7 @@ var AggregationTests = []AggregationTestCase{ }, "date_histogram": { "calendar_interval": "1d", - "field": "order_date", - "time_zone": "Europe/Warsaw" + "field": "order_date" } } }, @@ -5094,16 +5110,16 @@ var AggregationTests = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-16T12:15:11.790Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-04-16T12:30:11.790Z'))`, - `SELECT count(if(("bytes_gauge">=0.000000 AND "bytes_gauge"<1000.000000),1,NULL)), ` + - `count(if(("bytes_gauge">=1000.000000 AND "bytes_gauge"<2000.000000),1,NULL)), ` + - `count(if("bytes_gauge">=-5.500000,1,NULL)), ` + - `count(if("bytes_gauge"<6.555000,1,NULL)), ` + + `SELECT count(if(("bytes_gauge">=0 AND "bytes_gauge"<1000),1,NULL)), ` + + `count(if(("bytes_gauge">=1000 AND "bytes_gauge"<2000),1,NULL)), ` + + `count(if("bytes_gauge">=-5.5,1,NULL)), ` + + `count(if("bytes_gauge"<6.555,1,NULL)), ` + `count(), count() FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-16T12:15:11.790Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-04-16T12:30:11.790Z'))`, - `SELECT count(if(("bytes_gauge">=0.000000 AND "bytes_gauge"<1000.000000),1,NULL)), ` + - `count(if(("bytes_gauge">=1000.000000 AND "bytes_gauge"<2000.000000),1,NULL)), ` + - `count(if("bytes_gauge">=-5.500000,1,NULL)), ` + - `count(if("bytes_gauge"<6.555000,1,NULL)), ` + + `SELECT count(if(("bytes_gauge">=0 AND "bytes_gauge"<1000),1,NULL)), ` + + `count(if(("bytes_gauge">=1000 AND "bytes_gauge"<2000),1,NULL)), ` + + `count(if("bytes_gauge">=-5.5,1,NULL)), ` + + `count(if("bytes_gauge"<6.555,1,NULL)), ` + `count(), count() FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-16T12:15:11.790Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-04-16T12:30:11.790Z'))`, }, @@ -5686,20 +5702,20 @@ var AggregationTests = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z'))`, - `SELECT floor("bytes"/100.000000)*100.000000, count() ` + + `SELECT floor("bytes"/100)*100, count() ` + `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) ` + - `GROUP BY floor("bytes"/100.000000)*100.000000 ` + - `ORDER BY floor("bytes"/100.000000)*100.000000`, + `GROUP BY floor("bytes"/100)*100 ` + + `ORDER BY floor("bytes"/100)*100`, }, ExpectedPancakeSQL: ` - SELECT floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) - GROUP BY floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0" + GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [26] @@ -5714,8 +5730,7 @@ var AggregationTests = []AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "30s", - "min_doc_count": 0, - "time_zone": "Europe/Warsaw" + "min_doc_count": 0 } } }, @@ -5998,28 +6013,28 @@ var AggregationTests = []AggregationTestCase{ {{Cols: []model.QueryResultCol{model.NewQueryResultCol("hits", uint64(6))}}}, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("rspContentLen" / 2000.000000) * 2000.000000`, 0.0), + model.NewQueryResultCol(`floor("rspContentLen" / 2000) * 2000`, 0.0), model.NewQueryResultCol("message", "a"), model.NewQueryResultCol("doc_count", 2), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("rspContentLen" / 2000.000000) * 2000.000000`, 0.0), + model.NewQueryResultCol(`floor("rspContentLen" / 2000) * 2000`, 0.0), model.NewQueryResultCol("message", "b"), model.NewQueryResultCol("doc_count", 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("rspContentLen" / 2000.000000) * 2000.000000`, 4000.0), + model.NewQueryResultCol(`floor("rspContentLen" / 2000) * 2000`, 4000.0), model.NewQueryResultCol("message", "c"), model.NewQueryResultCol("doc_count", 1), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("rspContentLen" / 2000.000000) * 2000.000000`, 0.0), + model.NewQueryResultCol(`floor("rspContentLen" / 2000) * 2000`, 0.0), model.NewQueryResultCol("doc_count", 3), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("rspContentLen" / 2000.000000) * 2000.000000`, 4000.0), + model.NewQueryResultCol(`floor("rspContentLen" / 2000) * 2000`, 4000.0), model.NewQueryResultCol("doc_count", 1), }}, }, @@ -6053,14 +6068,14 @@ var AggregationTests = []AggregationTestCase{ ExpectedSQLs: []string{ `SELECT count() ` + `FROM ` + TableName, - `SELECT floor("rspContentLen" / 2000.000000) * 2000.000000, "message", count() ` + + `SELECT floor("rspContentLen" / 2000) * 2000, "message", count() ` + `FROM ` + TableName + ` ` + - `GROUP BY floor("rspContentLen" / 2000.000000) * 2000.000000, "message" ` + - `ORDER BY floor("rspContentLen" / 2000.000000) * 2000.000000, "message"`, - `SELECT floor("rspContentLen" / 2000.000000) * 2000.000000, count() ` + + `GROUP BY floor("rspContentLen" / 2000) * 2000, "message" ` + + `ORDER BY floor("rspContentLen" / 2000) * 2000, "message"`, + `SELECT floor("rspContentLen" / 2000) * 2000, count() ` + `FROM ` + TableName + ` ` + - `GROUP BY floor("rspContentLen" / 2000.000000) * 2000.000000 ` + - `ORDER BY floor("rspContentLen" / 2000.000000) * 2000.000000`, + `GROUP BY floor("rspContentLen" / 2000) * 2000 ` + + `ORDER BY floor("rspContentLen" / 2000) * 2000`, }, ExpectedPancakeSQL: ` SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__2__parent_count", @@ -6074,13 +6089,13 @@ var AggregationTests = []AggregationTestCase{ "aggr__0__2__order_1" DESC, "aggr__0__2__key_0" ASC) AS "aggr__0__2__order_1_rank" FROM ( - SELECT floor("rspContentLen"/2000.000000)*2000.000000 AS "aggr__0__key_0", + SELECT floor("rspContentLen"/2000)*2000 AS "aggr__0__key_0", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__2__parent_count", "message" AS "aggr__0__2__key_0", count(*) AS "aggr__0__2__count", count() AS "aggr__0__2__order_1" FROM ` + TableName + ` - GROUP BY floor("rspContentLen"/2000.000000)*2000.000000 AS "aggr__0__key_0", + GROUP BY floor("rspContentLen"/2000)*2000 AS "aggr__0__key_0", "message" AS "aggr__0__2__key_0")) WHERE "aggr__0__2__order_1_rank"<=5 ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__2__order_1_rank" ASC`, @@ -6851,8 +6866,7 @@ var AggregationTests = []AggregationTestCase{ "date_histogram": { "calendar_interval": "1d", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -7162,8 +7176,8 @@ var AggregationTests = []AggregationTestCase{ "variance_sampling": "NaN" }, "doc_count": 1, - "key": 1716333600000, - "key_as_string": "2024-05-21T23:20:00.000" + "key": 1716326400000, + "key_as_string": "2024-05-21T21:20:00.000" }, { "1": { @@ -7211,8 +7225,8 @@ var AggregationTests = []AggregationTestCase{ "variance_sampling": 2856099.6964285714 }, "doc_count": 8, - "key": 1716377400000, - "key_as_string": "2024-05-22T11:30:00.000" + "key": 1716370200000, + "key_as_string": "2024-05-22T09:30:00.000" } ] } @@ -7348,7 +7362,7 @@ var AggregationTests = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z'))`, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000), ` + + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000), ` + `count("bytes"), ` + `minOrNull("bytes"), ` + `maxOrNull("bytes"), ` + @@ -7362,9 +7376,11 @@ var AggregationTests = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000)`, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000), ` + + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000)`, + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000), ` + `count("bytes"), ` + `minOrNull("bytes"), ` + `maxOrNull("bytes"), ` + @@ -7378,19 +7394,25 @@ var AggregationTests = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000)`, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000), ` + + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000)`, + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000), ` + `count() ` + `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone(` + + `"timestamp",'Europe/Warsaw'))*1000) / 600000)`, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__0__key_0" - , count(*) AS "aggr__0__count", count("bytes") AS "metric__0__1_col_0", + SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0", + count(*) AS "aggr__0__count", count("bytes") AS "metric__0__1_col_0", minOrNull("bytes") AS "metric__0__1_col_1", maxOrNull("bytes") AS "metric__0__1_col_2", avgOrNull("bytes") AS "metric__0__1_col_3", @@ -7413,8 +7435,8 @@ var AggregationTests = []AggregationTestCase{ FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') AND "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z')) - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__0__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, }, { // [33] @@ -7934,7 +7956,7 @@ var AggregationTests = []AggregationTestCase{ "aggregations": { "0": { "meta": { - "bucketSize": 3600.000000, + "bucketSize": 3600, "intervalString": "3600s", "seriesId": "61ca57f1-469d-11e7-af02-69e470af7417", "timeField": "timestamp" diff --git a/quesma/testdata/aggregation_requests_2.go b/quesma/testdata/aggregation_requests_2.go index 0a8a796a7..ea0ec310d 100644 --- a/quesma/testdata/aggregation_requests_2.go +++ b/quesma/testdata/aggregation_requests_2.go @@ -12,8 +12,6 @@ import ( var AggregationTests2 = []AggregationTestCase{ { // [42] - // FIXME results for this test are not 100% correct for day/week intervals (maybe others too) - // see https://github.com/QuesmaOrg/quesma/issues/307 TestName: "histogram with all possible calendar_intervals", QueryRequestJson: ` { @@ -33,8 +31,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "minute", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "hour1": { @@ -49,8 +46,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "hour", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "day1": { @@ -65,8 +61,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "day", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "week1": { @@ -81,8 +76,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "week", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "month1": { @@ -97,8 +91,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "month", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "quarter1": { @@ -113,8 +106,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "quarter", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } }, "year1": { @@ -129,8 +121,7 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "calendar_interval": "year", "field": "@timestamp", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -285,16 +276,16 @@ var AggregationTests2 = []AggregationTestCase{ } ] }, - "day2": { + "day1": { "buckets": [ { - "key_as_string": "2024-06-10T00:00:00.000", - "key": 1717977600000, + "key_as_string": "2024-06-09T22:00:00.000", + "key": 1717970400000, "doc_count": 33 } ] }, - "day1": { + "day2": { "buckets": [ { "key_as_string": "2024-06-10T00:00:00.000", @@ -308,15 +299,15 @@ var AggregationTests2 = []AggregationTestCase{ ExpectedResults: [][]model.QueryResultRow{ {{Cols: []model.QueryResultCol{model.NewQueryResultCol("hits", uint64(33))}}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, int64(1717980400000/86400000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, int64(1717977600000/86400000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, int64(1717980400000/86400000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, int64(1717977600000/86400000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000)`, int64(1718024400000/3600000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000)`, int64(1718031600000/3600000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -325,11 +316,11 @@ var AggregationTests2 = []AggregationTestCase{ }}}, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, int64(1718025840000/60000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, int64(1718033040000/60000)), model.NewQueryResultCol("count()", uint64(9)), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, int64(1718025900000/60000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, int64(1718033100000/60000)), model.NewQueryResultCol("count()", uint64(24)), }}, }, @@ -344,7 +335,7 @@ var AggregationTests2 = []AggregationTestCase{ }}, }, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfMonth("@timestamp")))`, int64(1717192800000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfMonth("@timestamp")))`, int64(1717200000000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -352,7 +343,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfQuarter("@timestamp")))`, int64(1711922400000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfQuarter("@timestamp")))`, int64(1711929600000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -360,7 +351,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfWeek("@timestamp")))`, int64(1717970400000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfWeek("@timestamp")))`, int64(1717977600000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -368,7 +359,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfYear("@timestamp")))`, int64(1704063600000)), + model.NewQueryResultCol(`toInt64(toUnixTimestamp64Milli(toStartOfYear("@timestamp")))`, int64(1704067200000)), model.NewQueryResultCol("count()", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -388,7 +379,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__day2__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__hour1__key_0", int64(1718024400000/3600000)), + model.NewQueryResultCol("aggr__hour1__key_0", int64(1718031600000/3600000)), model.NewQueryResultCol("aggr__hour1__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -397,11 +388,11 @@ var AggregationTests2 = []AggregationTestCase{ }}}, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__minute1__key_0", int64(1718025840000/60000)), + model.NewQueryResultCol("aggr__minute1__key_0", int64(1718033040000/60000)), model.NewQueryResultCol("aggr__minute1__count", uint64(9)), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__minute1__key_0", int64(1718025900000/60000)), + model.NewQueryResultCol("aggr__minute1__key_0", int64(1718033100000/60000)), model.NewQueryResultCol("aggr__minute1__count", uint64(24)), }}, }, @@ -416,7 +407,7 @@ var AggregationTests2 = []AggregationTestCase{ }}, }, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__month1__key_0", int64(1717192800000)), + model.NewQueryResultCol("aggr__month1__key_0", int64(1717200000000)), model.NewQueryResultCol("aggr__month1__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -424,7 +415,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__month2__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__quarter1__key_0", int64(1711922400000)), + model.NewQueryResultCol("aggr__quarter1__key_0", int64(1711929600000)), model.NewQueryResultCol("aggr__quarter1__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -432,7 +423,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__quarter2__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__week1__key_0", int64(1717970400000)), + model.NewQueryResultCol("aggr__week1__key_0", int64(1717977600000)), model.NewQueryResultCol("aggr__week1__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -440,7 +431,7 @@ var AggregationTests2 = []AggregationTestCase{ model.NewQueryResultCol("aggr__week2__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ - model.NewQueryResultCol("aggr__year1__key_0", int64(1704063600000)), + model.NewQueryResultCol("aggr__year1__key_0", int64(1704067200000)), model.NewQueryResultCol("aggr__year1__count", uint64(33)), }}}, {{Cols: []model.QueryResultCol{ @@ -450,69 +441,70 @@ var AggregationTests2 = []AggregationTestCase{ }, ExpectedSQLs: []string{ `SELECT count() FROM ` + TableName, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000), count() ` + + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 86400000), count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 86400000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 86400000)`, `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000), count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) ` + `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000)`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000), count() ` + + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 3600000), count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 3600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 3600000)`, `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000), count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) ` + `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000)`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000), count() ` + + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 60000), count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 60000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 60000)`, `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000), count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) ` + `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000)`, - `SELECT toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000, count() ` + + `SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000, count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 ` + - `ORDER BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000`, + `GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 ` + + `ORDER BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000`, `SELECT toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000, count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 ` + `ORDER BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000`, - `SELECT toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000, count() ` + + `SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000, count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 ` + - `ORDER BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000`, + `GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 ` + + `ORDER BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000`, `SELECT toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000, count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 ` + `ORDER BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000`, - `SELECT toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000, count() ` + + `SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000, count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 ` + - `ORDER BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000`, + `GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 ` + + `ORDER BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000`, `SELECT toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000, count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 ` + `ORDER BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000`, - `SELECT toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000, count() ` + + `SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000, count() ` + `FROM ` + TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000 ` + - `ORDER BY toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000`, + `GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 ` + + `ORDER BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000`, `SELECT toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000, count() ` + `FROM ` + TableName + ` ` + `GROUP BY toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000 ` + `ORDER BY toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000`, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__day1__key_0", count(*) AS "aggr__day1__count" + SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 86400000) AS "aggr__day1__key_0", + count(*) AS "aggr__day1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS - "aggr__day1__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 86400000) AS "aggr__day1__key_0" ORDER BY "aggr__day1__key_0" ASC`, ExpectedAdditionalPancakeSQLs: []string{ `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS @@ -521,11 +513,12 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 86400000) AS "aggr__day2__key_0" ORDER BY "aggr__day2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__hour1__key_0", count(*) AS "aggr__hour1__count" + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 3600000) AS "aggr__hour1__key_0", + count(*) AS "aggr__hour1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__hour1__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 3600000) AS "aggr__hour1__key_0" ORDER BY "aggr__hour1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS "aggr__hour2__key_0", count(*) AS "aggr__hour2__count" @@ -533,11 +526,12 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS "aggr__hour2__key_0" ORDER BY "aggr__hour2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__minute1__key_0", count(*) AS "aggr__minute1__count" + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 60000) AS "aggr__minute1__key_0", + count(*) AS "aggr__minute1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__minute1__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp",'Europe/Warsaw'))*1000) / 60000) AS "aggr__minute1__key_0" ORDER BY "aggr__minute1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__minute2__key_0", count(*) AS "aggr__minute2__count" @@ -545,11 +539,11 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__minute2__key_0" ORDER BY "aggr__minute2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 AS - "aggr__month1__key_0", count(*) AS "aggr__month1__count" + `SELECT toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__month1__key_0", count(*) AS "aggr__month1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 AS - "aggr__month1__key_0" + GROUP BY toInt64(toUnixTimestamp(toStartOfMonth(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__month1__key_0" ORDER BY "aggr__month1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 AS "aggr__month2__key_0", count(*) AS "aggr__month2__count" @@ -557,10 +551,10 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp(toStartOfMonth("@timestamp")))*1000 AS "aggr__month2__key_0" ORDER BY "aggr__month2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 AS - "aggr__quarter1__key_0", count(*) AS "aggr__quarter1__count" + `SELECT toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__quarter1__key_0", count(*) AS "aggr__quarter1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 AS + GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 AS "aggr__quarter1__key_0" ORDER BY "aggr__quarter1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 AS @@ -569,11 +563,11 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp(toStartOfQuarter("@timestamp")))*1000 AS "aggr__quarter2__key_0" ORDER BY "aggr__quarter2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 AS + `SELECT toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 AS "aggr__week1__key_0", count(*) AS "aggr__week1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 AS - "aggr__week1__key_0" + GROUP BY toInt64(toUnixTimestamp(toStartOfWeek(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__week1__key_0" ORDER BY "aggr__week1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 AS "aggr__week2__key_0", count(*) AS "aggr__week2__count" @@ -581,11 +575,11 @@ var AggregationTests2 = []AggregationTestCase{ GROUP BY toInt64(toUnixTimestamp(toStartOfWeek("@timestamp")))*1000 AS "aggr__week2__key_0" ORDER BY "aggr__week2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000 AS - "aggr__year1__key_0", count(*) AS "aggr__year1__count" + `SELECT toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__year1__key_0", count(*) AS "aggr__year1__count" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000 AS - "aggr__year1__key_0" + GROUP BY toInt64(toUnixTimestamp(toStartOfYear(toTimezone("@timestamp",'Europe/Warsaw'))))*1000 + AS "aggr__year1__key_0" ORDER BY "aggr__year1__key_0" ASC`, `SELECT toInt64(toUnixTimestamp(toStartOfYear("@timestamp")))*1000 AS "aggr__year2__key_0", count(*) AS "aggr__year2__count" @@ -1855,16 +1849,14 @@ var AggregationTests2 = []AggregationTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 }, "aggs": { "3": { "date_histogram": { "field": "@timestamp", "fixed_interval": "40s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } } @@ -2175,12 +2167,12 @@ var AggregationTests2 = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z'))`, - `SELECT floor("bytes"/100.000000)*100.000000, count() ` + + `SELECT floor("bytes"/100)*100, count() ` + `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) ` + - `GROUP BY floor("bytes"/100.000000)*100.000000 ` + - `ORDER BY floor("bytes"/100.000000)*100.000000`, + `GROUP BY floor("bytes"/100)*100 ` + + `ORDER BY floor("bytes"/100)*100`, }, ExpectedPancakeSQL: ` SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", @@ -2193,15 +2185,15 @@ var AggregationTests2 = []AggregationTestCase{ dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" FROM ( - SELECT floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - floor("bytes2"/5.000000)*5.000000 AS "aggr__2__3__key_0", + floor("bytes2"/5)*5 AS "aggr__2__3__key_0", count(*) AS "aggr__2__3__count" FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) - GROUP BY floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0", - floor("bytes2"/5.000000)*5.000000 AS "aggr__2__3__key_0")) + GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", + floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, }, { // [50] TODO: what about nulls in histogram? Maybe they should be treated like in terms? @@ -2380,12 +2372,12 @@ var AggregationTests2 = []AggregationTestCase{ `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z'))`, - `SELECT floor("bytes"/100.000000)*100.000000, count() ` + + `SELECT floor("bytes"/100)*100, count() ` + `FROM ` + TableName + ` ` + `WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') ` + `AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) ` + - `GROUP BY floor("bytes"/100.000000)*100.000000 ` + - `ORDER BY floor("bytes"/100.000000)*100.000000`, + `GROUP BY floor("bytes"/100)*100 ` + + `ORDER BY floor("bytes"/100)*100`, }, ExpectedPancakeSQL: ` SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__3__key_0", @@ -2398,15 +2390,15 @@ var AggregationTests2 = []AggregationTestCase{ dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank" FROM ( - SELECT floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", - floor("bytes2"/5.000000)*5.000000 AS "aggr__2__3__key_0", + floor("bytes2"/5)*5 AS "aggr__2__3__key_0", count(*) AS "aggr__2__3__count" FROM ` + TableName + ` WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) - GROUP BY floor("bytes"/100.000000)*100.000000 AS "aggr__2__key_0", - floor("bytes2"/5.000000)*5.000000 AS "aggr__2__3__key_0")) + GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", + floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, }, { // [51] diff --git a/quesma/testdata/clients/kunkka.go b/quesma/testdata/clients/kunkka.go index 9df066724..be30b89f5 100644 --- a/quesma/testdata/clients/kunkka.go +++ b/quesma/testdata/clients/kunkka.go @@ -21,8 +21,7 @@ var KunkkaTests = []testdata.AggregationTestCase{ "0": { "date_histogram": { "field": "@timestamp", - "calendar_interval": "1h", - "time_zone": "Europe/Warsaw" + "calendar_interval": "1h" }, "aggs": { "1": { @@ -347,8 +346,8 @@ var KunkkaTests = []testdata.AggregationTestCase{ "doc_count": 0 }, "doc_count": 2, - "key": 1718794800000, - "key_as_string": "2024-06-19T11:00:00.000" + "key": 1718787600000, + "key_as_string": "2024-06-19T09:00:00.000" }, { "1": { @@ -361,8 +360,8 @@ var KunkkaTests = []testdata.AggregationTestCase{ "doc_count": 0 }, "doc_count": 3, - "key": 1718798400000, - "key_as_string": "2024-06-19T12:00:00.000" + "key": 1718791200000, + "key_as_string": "2024-06-19T10:00:00.000" }, { "1": { @@ -375,8 +374,8 @@ var KunkkaTests = []testdata.AggregationTestCase{ "doc_count": 1 }, "doc_count": 2, - "key": 1718802000000, - "key_as_string": "2024-06-19T13:00:00.000" + "key": 1718794800000, + "key_as_string": "2024-06-19T11:00:00.000" } ] } @@ -482,15 +481,16 @@ var KunkkaTests = []testdata.AggregationTestCase{ `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000)`, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__0__key_0", count(*) AS "aggr__0__count", + SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__0__key_0", + count(*) AS "aggr__0__count", sumOrNull("spent") AS "metric__0__1_col_0", countIf(` + fullTextFieldName + ` iLIKE '%started%') AS "aggr__0__2-bucket__count", sumOrNullIf("multiplier", ` + fullTextFieldName + ` iLIKE '%started%') AS "metric__0__2-bucket__2-metric_col_0" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 3600000) AS - "aggr__0__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( + "@timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, }, { // [2] diff --git a/quesma/testdata/kibana-visualize/aggregation_requests.go b/quesma/testdata/kibana-visualize/aggregation_requests.go index c8e006604..281dff5a6 100644 --- a/quesma/testdata/kibana-visualize/aggregation_requests.go +++ b/quesma/testdata/kibana-visualize/aggregation_requests.go @@ -130,8 +130,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "sum_other_doc_count": 1 }, "doc_count": 4, - "key": 1716834210000, - "key_as_string": "2024-05-27T18:23:30.000" + "key": 1716827010000, + "key_as_string": "2024-05-27T16:23:30.000" }, { "1": { @@ -157,8 +157,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "sum_other_doc_count": 12 }, "doc_count": 16, - "key": 1716834270000, - "key_as_string": "2024-05-27T18:24:30.000" + "key": 1716827070000, + "key_as_string": "2024-05-27T16:24:30.000" } ] } @@ -274,19 +274,19 @@ var AggregationTests = []testdata.AggregationTestCase{ `FROM ` + testdata.TableName + ` ` + `WHERE ("@timestamp">=parseDateTime64BestEffort('2024-05-27T11:59:56.627Z') ` + `AND "@timestamp"<=parseDateTime64BestEffort('2024-05-27T12:14:56.627Z'))`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000), ` + + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000), ` + `"severity", "source", count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE ("@timestamp">=parseDateTime64BestEffort('2024-05-27T11:59:56.627Z') ` + `AND "@timestamp"<=parseDateTime64BestEffort('2024-05-27T12:14:56.627Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000), ` + `"severity", "source" ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000), ` + `"severity", "source"`, - `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000), count() ` + + `GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000), "severity", "source" ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000), "severity", "source"`, + `SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000), count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE ("@timestamp">=parseDateTime64BestEffort('2024-05-27T11:59:56.627Z') ` + `AND "@timestamp"<=parseDateTime64BestEffort('2024-05-27T12:14:56.627Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone("@timestamp",'Europe/Warsaw'))*1000) / 30000)`, }, ExpectedPancakeSQL: ` SELECT "aggr__0__key_0", "aggr__0__count", "aggr__0__1__parent_count", @@ -302,8 +302,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "aggr__0__1__order_2" DESC, "aggr__0__1__key_0" ASC, "aggr__0__1__key_1" ASC ) AS "aggr__0__1__order_1_rank" FROM ( - SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0", + SELECT toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( + toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__0__key_0", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__count", sum(count(*)) OVER (PARTITION BY "aggr__0__key_0") AS "aggr__0__1__parent_count", "severity" AS "aggr__0__1__key_0", @@ -312,9 +312,9 @@ var AggregationTests = []testdata.AggregationTestCase{ FROM ` + TableName + ` WHERE ("@timestamp">=parseDateTime64BestEffort('2024-05-27T11:59:56.627Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-05-27T12:14:56.627Z')) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0", "severity" AS "aggr__0__1__key_0", - "source" AS "aggr__0__1__key_1")) + GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( + toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__0__key_0", + "severity" AS "aggr__0__1__key_0", "source" AS "aggr__0__1__key_1")) WHERE "aggr__0__1__order_1_rank"<=3 ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, }, @@ -335,8 +335,7 @@ var AggregationTests = []testdata.AggregationTestCase{ "min": 1716811173493 }, "field": "@timestamp", - "fixed_interval": "30s", - "time_zone": "Europe/Warsaw" + "fixed_interval": "30s" } } }, @@ -566,8 +565,7 @@ var AggregationTests = []testdata.AggregationTestCase{ "min": 1716833578178 }, "field": "@timestamp", - "fixed_interval": "30s", - "time_zone": "Europe/Warsaw" + "fixed_interval": "30s" } }, "2": { @@ -873,8 +871,7 @@ var AggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, diff --git a/quesma/testdata/opensearch-visualize/aggregation_requests.go b/quesma/testdata/opensearch-visualize/aggregation_requests.go index 3de616c78..59721fe43 100644 --- a/quesma/testdata/opensearch-visualize/aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/aggregation_requests.go @@ -147,7 +147,7 @@ var AggregationTests = []testdata.AggregationTestCase{ `SELECT count(DISTINCT "ftd_session_time") ` + `FROM ` + testdata.TableName + ` ` + `WHERE (("epoch_time">='2024-04-27T14:25:59.383Z' AND "epoch_time"<='2024-04-27T14:40:59.383Z') AND "ftd_session_time">=-100)`, - `SELECT count(if("ftd_session_time"<1000.000000,1,NULL)), count(if("ftd_session_time">=-100.000000,1,NULL)), count() ` + + `SELECT count(if("ftd_session_time"<1000,1,NULL)), count(if("ftd_session_time">=-100,1,NULL)), count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE ("epoch_time">='2024-04-27T14:25:59.383Z' AND "epoch_time"<='2024-04-27T14:40:59.383Z')`, }, @@ -508,8 +508,8 @@ var AggregationTests = []testdata.AggregationTestCase{ `FROM ` + testdata.TableName + ` ` + `WHERE (("epoch_time">='2024-04-28T14:34:22.674Z' AND "epoch_time"<='2024-04-28T14:49:22.674Z') ` + `AND "epoch_time_original">=1000)`, - `SELECT count(if(("epoch_time_original">=0.000000 AND "epoch_time_original"<1000.000000),1,NULL)), ` + - `count(if("epoch_time_original">=1000.000000,1,NULL)), count() ` + + `SELECT count(if(("epoch_time_original">=0 AND "epoch_time_original"<1000),1,NULL)), ` + + `count(if("epoch_time_original">=1000,1,NULL)), count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE ("epoch_time">='2024-04-28T14:34:22.674Z' AND "epoch_time"<='2024-04-28T14:49:22.674Z')`, }, @@ -674,8 +674,8 @@ var AggregationTests = []testdata.AggregationTestCase{ `FROM ` + testdata.TableName + ` ` + `WHERE (("epoch_time">='2024-04-18T04:40:12.252Z' AND "epoch_time"<='2024-05-03T04:40:12.252Z') ` + `AND ("properties::exoestimation_connection_speedinkbps">=1000 AND "properties::exoestimation_connection_speedinkbps"<2000))`, - `SELECT count(if(("properties::exoestimation_connection_speedinkbps">=0.000000 AND "properties::exoestimation_connection_speedinkbps"<1000.000000),1,NULL)), ` + - `count(if(("properties::exoestimation_connection_speedinkbps">=1000.000000 AND "properties::exoestimation_connection_speedinkbps"<2000.000000),1,NULL)), ` + + `SELECT count(if(("properties::exoestimation_connection_speedinkbps">=0 AND "properties::exoestimation_connection_speedinkbps"<1000),1,NULL)), ` + + `count(if(("properties::exoestimation_connection_speedinkbps">=1000 AND "properties::exoestimation_connection_speedinkbps"<2000),1,NULL)), ` + `count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE ("epoch_time">='2024-04-18T04:40:12.252Z' AND "epoch_time"<='2024-05-03T04:40:12.252Z')`, @@ -1423,8 +1423,8 @@ var AggregationTests = []testdata.AggregationTestCase{ ] }, "doc_count": 9, - "key": 1714860000000, - "key_as_string": "2024-05-04T22:00:00.000" + "key": 1714852800000, + "key_as_string": "2024-05-04T20:00:00.000" }, { "1": { @@ -1440,8 +1440,8 @@ var AggregationTests = []testdata.AggregationTestCase{ ] }, "doc_count": 12, - "key": 1714863600000, - "key_as_string": "2024-05-04T23:00:00.000" + "key": 1714856400000, + "key_as_string": "2024-05-04T21:00:00.000" } ] } @@ -1492,25 +1492,26 @@ var AggregationTests = []testdata.AggregationTestCase{ }, ExpectedSQLs: []string{ `SELECT count() FROM ` + testdata.TableName, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 3600000), ` + - `countIf("AvgTicketPrice"<=0.000000)/count(*)*100, ` + - `countIf("AvgTicketPrice"<=50000.000000)/count(*)*100 ` + + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000), ` + + `countIf("AvgTicketPrice"<=0)/count(*)*100, ` + + `countIf("AvgTicketPrice"<=50000)/count(*)*100 ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 3600000)`, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 3600000), count() ` + + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000)`, + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000), count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 3600000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 3600000)`, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) AS - "aggr__2__key_0", count(*) AS "aggr__2__count", - countIf("AvgTicketPrice"<=0.000000)/count(*)*100 AS "metric__2__1_col_0", - countIf("AvgTicketPrice"<=50000.000000)/count(*)*100 AS "metric__2__1_col_1" + SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__2__key_0", + count(*) AS "aggr__2__count", + countIf("AvgTicketPrice"<=0)/count(*)*100 AS "metric__2__1_col_0", + countIf("AvgTicketPrice"<=50000)/count(*)*100 AS "metric__2__1_col_1" FROM ` + TableName + ` - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 3600000) AS - "aggr__2__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 3600000) AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [8] diff --git a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go index fdc7d1e8f..404a7ddaa 100644 --- a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go @@ -786,16 +786,16 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `SELECT count() ` + `FROM ` + testdata.TableName, `NoDBQuery`, - `SELECT floor("bytes"/200.000000)*200.000000, count() ` + + `SELECT floor("bytes"/200)*200, count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, }, ExpectedPancakeSQL: ` - SELECT floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - GROUP BY floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0" + GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [5] @@ -824,8 +824,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "date_histogram": { "field": "timestamp", - "fixed_interval": "10m", - "time_zone": "Europe/Warsaw" + "fixed_interval": "10m" } } }, @@ -1070,8 +1069,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "date_histogram": { "field": "timestamp", - "fixed_interval": "10m", - "time_zone": "Europe/Warsaw" + "fixed_interval": "10m" } } }, @@ -1515,16 +1513,16 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `SELECT count() ` + `FROM ` + testdata.TableName, `NoDBQuery`, - `SELECT floor("bytes"/200.000000)*200.000000, count() ` + + `SELECT floor("bytes"/200)*200, count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, }, ExpectedPancakeSQL: ` - SELECT floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - GROUP BY floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0" + GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [8] @@ -1689,16 +1687,16 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `SELECT count() ` + `FROM ` + testdata.TableName, `NoDBQuery`, - `SELECT floor("bytes"/200.000000)*200.000000, count() ` + + `SELECT floor("bytes"/200)*200, count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, }, ExpectedPancakeSQL: ` - SELECT floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - GROUP BY floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0" + GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [9] @@ -1727,8 +1725,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "date_histogram": { "field": "timestamp", - "fixed_interval": "10m", - "time_zone": "Europe/Warsaw" + "fixed_interval": "10m" } } }, @@ -2037,8 +2034,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 2.0 }, "doc_count": 2, - "key": 1714869000000, - "key_as_string": "2024-05-05T00:30:00.000" + "key": 1714861800000, + "key_as_string": "2024-05-04T22:30:00.000" }, { "1": { @@ -2048,8 +2045,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 2.0 }, "doc_count": 0, - "key": 1714869600000, - "key_as_string": "2024-05-05T00:40:00.000" + "key": 1714862400000, + "key_as_string": "2024-05-04T22:40:00.000" }, { "1": { @@ -2059,8 +2056,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 2.0 }, "doc_count": 0, - "key": 1714878600000, - "key_as_string": "2024-05-05T03:10:00.000" + "key": 1714871400000, + "key_as_string": "2024-05-05T01:10:00.000" }, { "1": { @@ -2070,8 +2067,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 4.0 }, "doc_count": 2, - "key": 1714879200000, - "key_as_string": "2024-05-05T03:20:00.000" + "key": 1714872000000, + "key_as_string": "2024-05-05T01:20:00.000" }, { "1": { @@ -2081,8 +2078,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 10.0 }, "doc_count": 6, - "key": 1714879800000, - "key_as_string": "2024-05-05T03:30:00.000" + "key": 1714872600000, + "key_as_string": "2024-05-05T01:30:00.000" }, { "1": { @@ -2092,8 +2089,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 12.0 }, "doc_count": 2, - "key": 1714880400000, - "key_as_string": "2024-05-05T03:40:00.000" + "key": 1714873200000, + "key_as_string": "2024-05-05T01:40:00.000" }, { "1": { @@ -2103,8 +2100,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 14.0 }, "doc_count": 2, - "key": 1714881000000, - "key_as_string": "2024-05-05T03:50:00.000" + "key": 1714873800000, + "key_as_string": "2024-05-05T01:50:00.000" }, { "1": { @@ -2114,8 +2111,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 14.0 }, "doc_count": 0, - "key": 1714881600000, - "key_as_string": "2024-05-05T04:00:00.000" + "key": 1714874400000, + "key_as_string": "2024-05-05T02:00:00.000" }, { "1": { @@ -2125,8 +2122,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 16.0 }, "doc_count": 2, - "key": 1714882200000, - "key_as_string": "2024-05-05T04:10:00.000" + "key": 1714875000000, + "key_as_string": "2024-05-05T02:10:00.000" }, { "1": { @@ -2136,8 +2133,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "value": 16.0 }, "doc_count": 0, - "key": 1714882800000, - "key_as_string": "2024-05-05T04:20:00.000" + "key": 1714875600000, + "key_as_string": "2024-05-05T02:20:00.000" } ] } @@ -2246,17 +2243,18 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `SELECT count() FROM ` + testdata.TableName, `NoDBQuery`, `NoDBQuery`, - `SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000), count() ` + + `SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000), count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone("timestamp",'Europe/Warsaw'))*1000) / 600000)`, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS "aggr__2__key_0", + SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 600000) AS - "aggr__2__key_0" + GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, { // [11] @@ -2276,8 +2274,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "10m", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -2433,8 +2430,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "10m", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -2924,8 +2920,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "10m", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -3745,55 +3740,55 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ {}, // NoDBQuery { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("client_ip", "255.205.14.152"), model.NewQueryResultCol(`sumOrNull("bytes")`, 13.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("client_ip", "252.177.62.191"), model.NewQueryResultCol(`sumOrNull("bytes")`, 7.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("client_ip", "246.106.125.113"), model.NewQueryResultCol(`sumOrNull("bytes")`, 7.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("client_ip", "236.212.255.77"), model.NewQueryResultCol(`sumOrNull("bytes")`, 18.0), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("client_ip", "255.205.14.152"), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("client_ip", "252.177.62.191"), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("client_ip", "246.106.125.113"), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("client_ip", "236.212.255.77"), model.NewQueryResultCol(`count()`, 1), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol(`count()`, 73), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol(`count()`, 25), }}, }, @@ -3837,28 +3832,28 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `FROM ` + testdata.TableName, `NoDBQuery`, `WITH cte_1 AS ` + - `(SELECT floor("bytes"/200.000000)*200.000000 AS "cte_1_1", "clientip" AS "cte_1_2", count() AS "cte_1_cnt" ` + + `(SELECT floor("bytes"/200)*200 AS "cte_1_1", "clientip" AS "cte_1_2", count() AS "cte_1_cnt" ` + `FROM ` + testdata.TableName + ` ` + `WHERE "clientip" IS NOT NULL ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, "clientip" ` + - `ORDER BY floor("bytes"/200.000000)*200.000000, "clientip" DESC ` + - `LIMIT 2 BY floor("bytes"/200.000000)*200.000000) ` + - `SELECT floor("bytes"/200.000000)*200.000000, "clientip", sumOrNull("bytes") ` + + `GROUP BY floor("bytes"/200)*200, "clientip" ` + + `ORDER BY floor("bytes"/200)*200, "clientip" DESC ` + + `LIMIT 2 BY floor("bytes"/200)*200) ` + + `SELECT floor("bytes"/200)*200, "clientip", sumOrNull("bytes") ` + `FROM ` + testdata.TableName + ` ` + - `INNER JOIN "cte_1" ON floor("bytes"/200.000000)*200.000000 = "cte_1_1" AND "clientip" = "cte_1_2" ` + + `INNER JOIN "cte_1" ON floor("bytes"/200)*200 = "cte_1_1" AND "clientip" = "cte_1_2" ` + `WHERE "clientip" IS NOT NULL ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, "clientip", cte_1_cnt ` + - `ORDER BY floor("bytes"/200.000000)*200.000000, "clientip" DESC`, - `SELECT floor("bytes"/200.000000)*200.000000, "clientip", count() ` + + `GROUP BY floor("bytes"/200)*200, "clientip", cte_1_cnt ` + + `ORDER BY floor("bytes"/200)*200, "clientip" DESC`, + `SELECT floor("bytes"/200)*200, "clientip", count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE "clientip" IS NOT NULL ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, "clientip" ` + - `ORDER BY floor("bytes"/200.000000)*200.000000, "clientip" DESC ` + - `LIMIT 2 BY floor("bytes"/200.000000)*200.000000`, - `SELECT floor("bytes"/200.000000)*200.000000, count() ` + + `GROUP BY floor("bytes"/200)*200, "clientip" ` + + `ORDER BY floor("bytes"/200)*200, "clientip" DESC ` + + `LIMIT 2 BY floor("bytes"/200)*200`, + `SELECT floor("bytes"/200)*200, count() ` + `FROM ` + testdata.TableName + ` ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, }, ExpectedPancakeSQL: ` SELECT "aggr__2__key_0", "aggr__2__count", "aggr__2__1-bucket__parent_count", @@ -3872,7 +3867,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ dense_rank() OVER (PARTITION BY "aggr__2__key_0" ORDER BY "aggr__2__1-bucket__key_0" DESC) AS "aggr__2__1-bucket__order_1_rank" FROM ( - SELECT floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0", + SELECT floor("bytes"/200)*200 AS "aggr__2__key_0", sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__count", sum(count(*)) OVER (PARTITION BY "aggr__2__key_0") AS "aggr__2__1-bucket__parent_count", @@ -3880,7 +3875,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ count(*) AS "aggr__2__1-bucket__count", sumOrNull("bytes") AS "metric__2__1-bucket__1-metric_col_0" FROM __quesma_table_name - GROUP BY floor("bytes"/200.000000)*200.000000 AS "aggr__2__key_0", + GROUP BY floor("bytes"/200)*200 AS "aggr__2__key_0", "clientip" AS "aggr__2__1-bucket__key_0")) WHERE "aggr__2__1-bucket__order_1_rank"<=3 ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__1-bucket__order_1_rank" ASC`, @@ -4071,8 +4066,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "10m", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -5389,8 +5383,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "12h", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -5727,216 +5720,216 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ {}, // NoDBQuery { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 6920.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 1000.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 27400.0), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`count()`, 6), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`count()`, 9), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), model.NewQueryResultCol(`count()`, 2), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`count()`, 3), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 0.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), model.NewQueryResultCol(`count()`, 15), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), model.NewQueryResultCol(`count()`, 6), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), model.NewQueryResultCol(`count()`, 1), }}, }, {}, // NoDBQuery { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1000.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 43320.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1000.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 44080.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 50040.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1400.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1400.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, 72640.0), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`avgOrNull("memory")`, nil), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1000.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1000.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1400.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1400.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`count()`, 2), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`count()`, 3), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), model.NewQueryResultCol(`count()`, 2), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), model.NewQueryResultCol(`count()`, 6), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), model.NewQueryResultCol(`count()`, 8), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), model.NewQueryResultCol(`count()`, 7), }}, }, { {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1000.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), model.NewQueryResultCol(`count()`, 2), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1200.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), model.NewQueryResultCol(`count()`, 1), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1400.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), model.NewQueryResultCol(`count()`, 3), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1600.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), model.NewQueryResultCol(`count()`, 4), }}, {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200.000000)*200.000000`, 1800.0), + model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), model.NewQueryResultCol(`count()`, 23), }}, }, @@ -5953,47 +5946,47 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ `SELECT count() ` + `FROM ` + testdata.TableName, `NoDBQuery`, - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `SELECT floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000), " + `avgOrNull("memory") ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=0 AND "bytes"<1000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + - `ORDER BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `GROUP BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + + `ORDER BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", + `SELECT floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000), " + `count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=0 AND "bytes"<1000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + - `ORDER BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `GROUP BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + + `ORDER BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", + `SELECT floor("bytes"/200)*200, ` + `count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=0 AND "bytes"<1000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, `NoDBQuery`, - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `SELECT floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000), " + `avgOrNull("memory") ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=1000 AND "bytes"<2000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + - `ORDER BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `GROUP BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + + `ORDER BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", + `SELECT floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000), " + `count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=1000 AND "bytes"<2000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + - `ORDER BY floor("bytes"/200.000000)*200.000000, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", - `SELECT floor("bytes"/200.000000)*200.000000, ` + + `GROUP BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000) " + + `ORDER BY floor("bytes"/200)*200, ` + "toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", + `SELECT floor("bytes"/200)*200, ` + `count() ` + `FROM ` + testdata.TableName + ` ` + `WHERE "bytes">=1000 AND "bytes"<2000 ` + - `GROUP BY floor("bytes"/200.000000)*200.000000 ` + - `ORDER BY floor("bytes"/200.000000)*200.000000`, + `GROUP BY floor("bytes"/200)*200 ` + + `ORDER BY floor("bytes"/200)*200`, `SELECT count(if("bytes">=0 AND "bytes"<1000,1,NULL)), ` + `count(if("bytes">=1000 AND "bytes"<2000,1,NULL)), ` + `count() ` + diff --git a/quesma/testdata/opensearch_requests.go b/quesma/testdata/opensearch_requests.go index 53a83f36d..a3dc1c13c 100644 --- a/quesma/testdata/opensearch_requests.go +++ b/quesma/testdata/opensearch_requests.go @@ -87,12 +87,14 @@ var OpensearchSearchTests = []SearchTestCase{ "SELECT count() FROM " + TableName + ` ` + `WHERE ("-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z') ` + `AND "-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z'))`, - `SELECT toInt64(toUnixTimestamp64Milli("-@timestamp") / 30000), count() ` + + `SELECT toInt64((toUnixTimestamp64Milli("-@timestamp")` + + `\+timeZoneOffset(toTimezone("-@timestamp",'Europe/Warsaw'))\*1000) / 30000), ` + + `count() ` + `FROM ` + TableName + ` ` + `WHERE ("-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z') ` + `AND "-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z')) ` + - `GROUP BY toInt64(toUnixTimestamp64Milli("-@timestamp") / 30000) ` + - `ORDER BY toInt64(toUnixTimestamp64Milli("-@timestamp") / 30000)`, + `GROUP BY toInt64((toUnixTimestamp64Milli("-@timestamp")\+timeZoneOffset(toTimezone("-@timestamp",'Europe/Warsaw'))\*1000) / 30000) ` + + `ORDER BY toInt64((toUnixTimestamp64Milli("-@timestamp")\+timeZoneOffset(toTimezone("-@timestamp",'Europe/Warsaw'))\*1000) / 30000)`, `SELECT.*"-@bytes".*FROM ` + TableName + ` ` + `WHERE ("-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z') ` + `AND "-@timestamp".=parseDateTime64BestEffort('2024-04-04T13:..:18.149Z')) ` + @@ -111,8 +113,7 @@ var OpensearchSearchTests = []SearchTestCase{ "date_histogram": { "field": "-@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, diff --git a/quesma/testdata/requests.go b/quesma/testdata/requests.go index 4c2fd4d1a..7d1c0c59a 100644 --- a/quesma/testdata/requests.go +++ b/quesma/testdata/requests.go @@ -550,8 +550,7 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -2406,8 +2405,7 @@ var TestSearchFilter = []SearchTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, @@ -2461,8 +2459,7 @@ var TestSearchFilter = []SearchTestCase{ "date_histogram": { "field": "@timestamp", "fixed_interval": "30s", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } }, diff --git a/quesma/testdata/requests_with_special_characters.go b/quesma/testdata/requests_with_special_characters.go index eacf7d861..20621c662 100644 --- a/quesma/testdata/requests_with_special_characters.go +++ b/quesma/testdata/requests_with_special_characters.go @@ -30,8 +30,7 @@ var AggregationTestsWithSpecialCharactersInFieldNames = []AggregationTestCase{ "date_histogram": { "field": "-@timestamp", "fixed_interval": "12h", - "min_doc_count": 1, - "time_zone": "Europe/Warsaw" + "min_doc_count": 1 } } },