From c0a483de3a3c78897fb21bdd17139c17e5d8d413 Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Thu, 23 Apr 2020 07:35:33 -0700 Subject: [PATCH 1/5] roachtest: fail tpcdsvec test with an error In `tpcdsvec` test we run all the queries even we hit an error. Previously if an error occurred, we would just fail the test, and now we will be failing with an error that is a "combination" of all occurred errors. Release note: None --- pkg/cmd/roachtest/tpcdsvec.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/cmd/roachtest/tpcdsvec.go b/pkg/cmd/roachtest/tpcdsvec.go index 72417cca17a0..6a98899e5d71 100644 --- a/pkg/cmd/roachtest/tpcdsvec.go +++ b/pkg/cmd/roachtest/tpcdsvec.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cmd/cmpconn" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/workload/tpcds" + "github.com/cockroachdb/errors" ) func registerTPCDSVec(r *testRegistry) { @@ -149,7 +150,7 @@ func registerTPCDSVec(r *testRegistry) { } noStatsRunTimes := make(map[int]float64) - encounteredErrors := false + var errToReport error // We will run all queries in two scenarios: without stats and with // auto stats. The idea is that the plans are likely to be different, // so we will be testing different execution scenarios. We additionally @@ -175,7 +176,7 @@ func registerTPCDSVec(r *testRegistry) { ctx, 3*timeout, conns, "", query, false, /* ignoreSQLErrors */ ); err != nil { t.Status(fmt.Sprintf("encountered an error: %s\n", err)) - encounteredErrors = true + errToReport = errors.CombineErrors(errToReport, err) } else { runTimeInSeconds := timeutil.Since(start).Seconds() t.Status( @@ -198,8 +199,8 @@ func registerTPCDSVec(r *testRegistry) { createStatsFromTables(t, clusterConn, tpcdsTables) } } - if encounteredErrors { - t.FailNow() + if errToReport != nil { + t.Fatal(errToReport) } } From 0b2fd2baa536e13757b01c206a0d58e3c831a42a Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 22 Apr 2020 20:41:46 -0700 Subject: [PATCH 2/5] colexec, coldata: fix compiler warnings in template files This commit fixes all compiler warnings that I see in Goland. To get there it does the following: 1. renames `Vec._TemplateType` to `Vec.TemplateType` so that the method is considered exported 2. pulls out declaration of local variables outside of templated `if` blocks 3. breaks up the chained function call to parse flags in `pkg/workload` and a few other places so that there is an allocation of a struct and we can call a method on it that has a pointer receiver. It shouldn't matter for the performance though. Release note: None --- pkg/ccl/importccl/read_import_workload.go | 3 ++- pkg/ccl/workloadccl/allccl/all_test.go | 3 ++- pkg/col/coldata/unknown_vec.go | 2 +- pkg/col/coldata/vec.go | 4 ++-- pkg/col/coldata/vec_tmpl.go | 14 ++++++------- pkg/sql/colexec/and_or_projection_tmpl.go | 20 ++++++++++--------- pkg/sql/colexec/any_not_null_agg_tmpl.go | 4 ++-- pkg/sql/colexec/avg_agg_tmpl.go | 4 ++-- pkg/sql/colexec/const_tmpl.go | 2 +- pkg/sql/colexec/distinct_tmpl.go | 6 +++--- .../cmd/execgen/any_not_null_agg_gen.go | 2 +- .../execgen/cmd/execgen/avg_agg_gen.go | 2 +- .../colexec/execgen/cmd/execgen/const_gen.go | 2 +- .../execgen/cmd/execgen/distinct_gen.go | 2 +- .../cmd/execgen/hash_aggregator_gen.go | 2 +- .../execgen/cmd/execgen/hash_utils_gen.go | 2 +- .../execgen/cmd/execgen/mergejoinbase_gen.go | 2 +- .../execgen/cmd/execgen/mergejoiner_gen.go | 2 +- .../execgen/cmd/execgen/projection_ops_gen.go | 2 ++ .../execgen/cmd/execgen/rowstovec_gen.go | 2 +- .../execgen/cmd/execgen/select_in_gen.go | 2 +- .../colexec/execgen/cmd/execgen/sort_gen.go | 2 +- .../execgen/cmd/execgen/sum_agg_gen.go | 2 +- .../execgen/cmd/execgen/values_differ_gen.go | 2 +- .../colexec/execgen/cmd/execgen/vec_gen.go | 2 +- pkg/sql/colexec/hash_aggregator_tmpl.go | 4 ++-- pkg/sql/colexec/hash_utils_tmpl.go | 7 ++++--- pkg/sql/colexec/hashtable_tmpl.go | 14 +++++++------ pkg/sql/colexec/mergejoinbase_tmpl.go | 4 ++-- pkg/sql/colexec/mergejoiner_tmpl.go | 20 +++++++++---------- pkg/sql/colexec/proj_const_ops_tmpl.go | 16 +++++++++++---- pkg/sql/colexec/rowstovec_tmpl.go | 2 +- pkg/sql/colexec/select_in_tmpl.go | 4 ++-- pkg/sql/colexec/selection_ops_tmpl.go | 18 +++++++++-------- pkg/sql/colexec/sort_tmpl.go | 2 +- pkg/sql/colexec/sum_agg_tmpl.go | 4 ++-- pkg/sql/colexec/values_differ_tmpl.go | 4 ++-- pkg/workload/workload.go | 3 ++- 38 files changed, 107 insertions(+), 87 deletions(-) diff --git a/pkg/ccl/importccl/read_import_workload.go b/pkg/ccl/importccl/read_import_workload.go index b19b5040409d..657ec83997e5 100644 --- a/pkg/ccl/importccl/read_import_workload.go +++ b/pkg/ccl/importccl/read_import_workload.go @@ -138,7 +138,8 @@ func (w *workloadReader) readFiles( } gen := meta.New() if f, ok := gen.(workload.Flagser); ok { - if err := f.Flags().Parse(conf.Flags); err != nil { + flags := f.Flags() + if err := flags.Parse(conf.Flags); err != nil { return errors.Wrapf(err, `parsing parameters %s`, strings.Join(conf.Flags, ` `)) } } diff --git a/pkg/ccl/workloadccl/allccl/all_test.go b/pkg/ccl/workloadccl/allccl/all_test.go index f50bcb8e6cfc..dc1b22e36e66 100644 --- a/pkg/ccl/workloadccl/allccl/all_test.go +++ b/pkg/ccl/workloadccl/allccl/all_test.go @@ -127,7 +127,8 @@ func TestAllRegisteredSetup(t *testing.T) { case `roachmart`: // TODO(dan): It'd be nice to test this with the default flags. For now, // this is better than nothing. - if err := gen.(workload.Flagser).Flags().Parse([]string{ + flags := gen.(workload.Flagser).Flags() + if err := flags.Parse([]string{ `--users=10`, `--orders=100`, `--partition=false`, }); err != nil { t.Fatal(err) diff --git a/pkg/col/coldata/unknown_vec.go b/pkg/col/coldata/unknown_vec.go index 395cb2fae911..88c3e0482195 100644 --- a/pkg/col/coldata/unknown_vec.go +++ b/pkg/col/coldata/unknown_vec.go @@ -71,7 +71,7 @@ func (u unknown) SetCol(interface{}) { panic("Vec is of unknown type and should not be accessed") } -func (u unknown) _TemplateType() []interface{} { +func (u unknown) TemplateType() []interface{} { panic("Vec is of unknown type and should not be accessed") } diff --git a/pkg/col/coldata/vec.go b/pkg/col/coldata/vec.go index a933d2204d29..69f72b4bd3cd 100644 --- a/pkg/col/coldata/vec.go +++ b/pkg/col/coldata/vec.go @@ -90,7 +90,7 @@ type Vec interface { // TemplateType returns an []interface{} and is used for operator templates. // Do not call this from normal code - it'll always panic. - _TemplateType() []interface{} + TemplateType() []interface{} // Append uses SliceArgs to append elements of a source Vec into this Vec. // It is logically equivalent to: @@ -226,7 +226,7 @@ func (m *memColumn) Col() interface{} { return m.col } -func (m *memColumn) _TemplateType() []interface{} { +func (m *memColumn) TemplateType() []interface{} { panic("don't call this from non template code") } diff --git a/pkg/col/coldata/vec_tmpl.go b/pkg/col/coldata/vec_tmpl.go index 9394090021be..1663b7836ee7 100644 --- a/pkg/col/coldata/vec_tmpl.go +++ b/pkg/col/coldata/vec_tmpl.go @@ -55,8 +55,8 @@ func (m *memColumn) Append(args SliceArgs) { switch args.ColType { // {{range .}} case _TYPES_T: - fromCol := args.Src._TemplateType() - toCol := m._TemplateType() + fromCol := args.Src.TemplateType() + toCol := m.TemplateType() // NOTE: it is unfortunate that we always append whole slice without paying // attention to whether the values are NULL. However, if we do start paying // attention, the performance suffers dramatically, so we choose to copy @@ -153,8 +153,8 @@ func (m *memColumn) Copy(args CopySliceArgs) { switch args.ColType { // {{range .}} case _TYPES_T: - fromCol := args.Src._TemplateType() - toCol := m._TemplateType() + fromCol := args.Src.TemplateType() + toCol := m.TemplateType() if args.Sel != nil { sel := args.Sel if args.SelOnDest { @@ -177,7 +177,7 @@ func (m *memColumn) Window(colType coltypes.T, start int, end int) Vec { switch colType { // {{range .}} case _TYPES_T: - col := m._TemplateType() + col := m.TemplateType() return &memColumn{ t: colType, col: execgen.WINDOW(col, start, end), @@ -195,7 +195,7 @@ func SetValueAt(v Vec, elem interface{}, rowIdx int, colType coltypes.T) { switch colType { // {{range .}} case _TYPES_T: - target := v._TemplateType() + target := v.TemplateType() newVal := elem.(_GOTYPE) execgen.SET(target, rowIdx, newVal) // {{end}} @@ -210,7 +210,7 @@ func GetValueAt(v Vec, rowIdx int, colType coltypes.T) interface{} { switch colType { // {{range .}} case _TYPES_T: - target := v._TemplateType() + target := v.TemplateType() return execgen.UNSAFEGET(target, rowIdx) // {{end}} default: diff --git a/pkg/sql/colexec/and_or_projection_tmpl.go b/pkg/sql/colexec/and_or_projection_tmpl.go index 4ed74506d794..b2482e86c6d3 100644 --- a/pkg/sql/colexec/and_or_projection_tmpl.go +++ b/pkg/sql/colexec/and_or_projection_tmpl.go @@ -131,10 +131,12 @@ func (o *_OP_LOWERProjOp) Next(ctx context.Context) coldata.Batch { // // knownResult indicates the boolean value which if present on the left side // fully determines the result of the logical operation. + var ( + knownResult bool + isLeftNull, isRightNull bool + ) // {{ if _IS_OR_OP }} - knownResult := true - // {{ else }} - knownResult := false + knownResult = true // {{ end }} leftCol := batch.ColVec(o.leftIdx) leftColVals := leftCol.Bool() @@ -234,9 +236,9 @@ func (o *_OP_LOWERProjOp) Next(ctx context.Context) coldata.Batch { func _ADD_TUPLE_FOR_RIGHT(_L_HAS_NULLS bool) { // */}} // {{define "addTupleForRight" -}} // {{if _L_HAS_NULLS}} - isLeftNull := leftNulls.NullAt(i) + isLeftNull = leftNulls.NullAt(i) // {{else}} - isLeftNull := false + isLeftNull = false // {{end}} if isLeftNull || leftColVals[i] != knownResult { // We add the tuple into the selection vector if the left value is NULL or @@ -280,18 +282,18 @@ func _SET_VALUES(_IS_OR_OP bool, _L_HAS_NULLS bool, _R_HAS_NULLS bool) { // */}} func _SET_SINGLE_VALUE(_IS_OR_OP bool, _L_HAS_NULLS bool, _R_HAS_NULLS bool) { // */}} // {{ define "setSingleValue" -}} // {{ if _L_HAS_NULLS }} - isLeftNull := leftNulls.NullAt(idx) + isLeftNull = leftNulls.NullAt(idx) // {{ else }} - isLeftNull := false + isLeftNull = false // {{ end }} leftVal := leftColVals[idx] if !isLeftNull && leftVal == knownResult { outputColVals[idx] = leftVal } else { // {{ if _R_HAS_NULLS }} - isRightNull := rightNulls.NullAt(idx) + isRightNull = rightNulls.NullAt(idx) // {{ else }} - isRightNull := false + isRightNull = false // {{ end }} rightVal := rightColVals[idx] // {{ if _IS_OR_OP }} diff --git a/pkg/sql/colexec/any_not_null_agg_tmpl.go b/pkg/sql/colexec/any_not_null_agg_tmpl.go index 08905465ff9d..da409c7af10f 100644 --- a/pkg/sql/colexec/any_not_null_agg_tmpl.go +++ b/pkg/sql/colexec/any_not_null_agg_tmpl.go @@ -90,7 +90,7 @@ type anyNotNull_TYPEAgg struct { func (a *anyNotNull_TYPEAgg) Init(groups []bool, vec coldata.Vec) { a.groups = groups a.vec = vec - a.col = vec._TemplateType() + a.col = vec.TemplateType() a.nulls = vec.Nulls() a.Reset() } @@ -131,7 +131,7 @@ func (a *anyNotNull_TYPEAgg) Compute(b coldata.Batch, inputIdxs []uint32) { return } vec, sel := b.ColVec(int(inputIdxs[0])), b.Selection() - col, nulls := vec._TemplateType(), vec.Nulls() + col, nulls := vec.TemplateType(), vec.Nulls() a.allocator.PerformOperation( []coldata.Vec{a.vec}, diff --git a/pkg/sql/colexec/avg_agg_tmpl.go b/pkg/sql/colexec/avg_agg_tmpl.go index 7d3fcff6bee0..b3a1b29fee40 100644 --- a/pkg/sql/colexec/avg_agg_tmpl.go +++ b/pkg/sql/colexec/avg_agg_tmpl.go @@ -97,7 +97,7 @@ var _ aggregateFunc = &avg_TYPEAgg{} func (a *avg_TYPEAgg) Init(groups []bool, v coldata.Vec) { a.groups = groups - a.scratch.vec = v._TemplateType() + a.scratch.vec = v.TemplateType() a.scratch.nulls = v.Nulls() a.Reset() } @@ -141,7 +141,7 @@ func (a *avg_TYPEAgg) Compute(b coldata.Batch, inputIdxs []uint32) { return } vec, sel := b.ColVec(int(inputIdxs[0])), b.Selection() - col, nulls := vec._TemplateType(), vec.Nulls() + col, nulls := vec.TemplateType(), vec.Nulls() if nulls.MaybeHasNulls() { if sel != nil { sel = sel[:inputLen] diff --git a/pkg/sql/colexec/const_tmpl.go b/pkg/sql/colexec/const_tmpl.go index 9f579908ed0f..388754ce6b6b 100644 --- a/pkg/sql/colexec/const_tmpl.go +++ b/pkg/sql/colexec/const_tmpl.go @@ -108,7 +108,7 @@ func (c const_TYPEOp) Next(ctx context.Context) coldata.Batch { return coldata.ZeroBatch } vec := batch.ColVec(c.outputIdx) - col := vec._TemplateType() + col := vec.TemplateType() if vec.MaybeHasNulls() { // We need to make sure that there are no left over null values in the // output vector. diff --git a/pkg/sql/colexec/distinct_tmpl.go b/pkg/sql/colexec/distinct_tmpl.go index ec296f4fcf56..689ad8be464d 100644 --- a/pkg/sql/colexec/distinct_tmpl.go +++ b/pkg/sql/colexec/distinct_tmpl.go @@ -236,7 +236,7 @@ func (p *sortedDistinct_TYPEOp) Next(ctx context.Context) coldata.Batch { if vec.MaybeHasNulls() { nulls = vec.Nulls() } - col := vec._TemplateType() + col := vec.TemplateType() // We always output the first row. lastVal := p.lastVal @@ -311,7 +311,7 @@ func (p partitioner_TYPE) partitionWithOrder( nulls = colVec.Nulls() } - col := colVec._TemplateType() + col := colVec.TemplateType() col = execgen.SLICE(col, 0, n) outputCol = outputCol[:n] outputCol[0] = true @@ -336,7 +336,7 @@ func (p partitioner_TYPE) partition(colVec coldata.Vec, outputCol []bool, n int) nulls = colVec.Nulls() } - col := colVec._TemplateType() + col := colVec.TemplateType() col = execgen.SLICE(col, 0, n) outputCol = outputCol[:n] outputCol[0] = true diff --git a/pkg/sql/colexec/execgen/cmd/execgen/any_not_null_agg_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/any_not_null_agg_gen.go index ca1dc98c53be..fe202040e76f 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/any_not_null_agg_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/any_not_null_agg_gen.go @@ -33,7 +33,7 @@ func genAnyNotNullAgg(wr io.Writer) error { s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) findAnyNotNull := makeFunctionRegex("_FIND_ANY_NOT_NULL", 4) s = findAnyNotNull.ReplaceAllString(s, `{{template "findAnyNotNull" buildDict "Global" . "LTyp" .LTyp "HasNulls" $4}}`) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go index 94437d3e8641..45c975ebfec0 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/avg_agg_gen.go @@ -69,7 +69,7 @@ func genAvgAgg(wr io.Writer) error { s = strings.Replace(s, "_GOTYPE", "{{.Type.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.Type}}", -1) s = strings.Replace(s, "_TYPE", "{{.Type}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.Type}}", -1) + s = strings.Replace(s, "TemplateType", "{{.Type}}", -1) assignDivRe := makeFunctionRegex("_ASSIGN_DIV_INT64", 3) s = assignDivRe.ReplaceAllString(s, makeTemplateFunctionCall("AssignDivInt64", 3)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/const_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/const_gen.go index feaff38b43f3..ea14462d5b1b 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/const_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/const_gen.go @@ -33,7 +33,7 @@ func genConstOps(wr io.Writer) error { s = strings.Replace(s, "_GOTYPE", "{{.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.}}", -1) s = strings.Replace(s, "_TYPE", "{{.}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.}}", -1) + s = strings.Replace(s, "TemplateType", "{{.}}", -1) s = replaceManipulationFuncs("", s) // Now, generate the op, from the template. diff --git a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go index 5fe2029020d3..91db94f01e9e 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/distinct_gen.go @@ -34,7 +34,7 @@ func genDistinctOps(wr io.Writer) error { s = strings.Replace(s, "_GOTYPESLICE", "{{.LTyp.GoTypeSliceName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) assignNeRe := makeFunctionRegex("_ASSIGN_NE", 3) s = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall("Global.Assign", 3)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go index eade36cd8b3f..7ffeccaa1ed9 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go @@ -29,7 +29,7 @@ func genHashAggregator(wr io.Writer) error { s := string(t) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = replaceManipulationFuncs(".Global.LTyp", s) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go index 76676f30de61..21df0e36725e 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/hash_utils_gen.go @@ -29,7 +29,7 @@ func genHashUtils(wr io.Writer) error { s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) assignHash := makeFunctionRegex("_ASSIGN_HASH", 2) s = assignHash.ReplaceAllString(s, makeTemplateFunctionCall("Global.UnaryAssign", 2)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/mergejoinbase_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/mergejoinbase_gen.go index 862109dd8091..6c949d6a9793 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/mergejoinbase_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/mergejoinbase_gen.go @@ -32,7 +32,7 @@ func genMergeJoinBase(wr io.Writer) error { // Replace the template variables. s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) assignEqRe := makeFunctionRegex("_ASSIGN_EQ", 3) s = assignEqRe.ReplaceAllString(s, makeTemplateFunctionCall("Assign", 3)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/mergejoiner_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/mergejoiner_gen.go index 02c688254575..b3559290c7d9 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/mergejoiner_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/mergejoiner_gen.go @@ -64,7 +64,7 @@ func genMergeJoinOps(wr io.Writer, jti joinTypeInfo) error { s = strings.Replace(s, "_GOTYPESLICE", "{{.LTyp.GoTypeSliceName}}", -1) s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) s = strings.Replace(s, "_L_SEL_IND", "{{$sel.LSelString}}", -1) s = strings.Replace(s, "_R_SEL_IND", "{{$sel.RSelString}}", -1) s = strings.Replace(s, "_IS_L_SEL", "{{$sel.IsLSel}}", -1) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go index 1fdee14a4fd9..bbd6ca5b51a4 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/projection_ops_gen.go @@ -79,11 +79,13 @@ func replaceProjConstTmplVariables(tmpl string, isConstLeft bool) string { tmpl = strings.Replace(tmpl, "_CONST_SIDE", "L", -1) tmpl = strings.Replace(tmpl, "_IS_CONST_LEFT", "true", -1) tmpl = strings.Replace(tmpl, "_OP_CONST_NAME", "proj{{.Name}}{{.LTyp}}Const{{.RTyp}}Op", -1) + tmpl = strings.Replace(tmpl, "_NON_CONST_GOTYPESLICE", "{{.RTyp.GoTypeSliceName}}", -1) tmpl = replaceManipulationFuncs(".RTyp", tmpl) } else { tmpl = strings.Replace(tmpl, "_CONST_SIDE", "R", -1) tmpl = strings.Replace(tmpl, "_IS_CONST_LEFT", "false", -1) tmpl = strings.Replace(tmpl, "_OP_CONST_NAME", "proj{{.Name}}{{.LTyp}}{{.RTyp}}ConstOp", -1) + tmpl = strings.Replace(tmpl, "_NON_CONST_GOTYPESLICE", "{{.LTyp.GoTypeSliceName}}", -1) tmpl = replaceManipulationFuncs(".LTyp", tmpl) } return replaceProjTmplVariables(tmpl) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/rowstovec_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/rowstovec_gen.go index 593b5c8e8dbd..2309372a3d7d 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/rowstovec_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/rowstovec_gen.go @@ -57,7 +57,7 @@ func genRowsToVec(wr io.Writer) error { s := string(f) // Replace the template variables. - s = strings.Replace(s, "_TemplateType", "{{.ExecType.String}}", -1) + s = strings.Replace(s, "TemplateType", "{{.ExecType.String}}", -1) s = strings.Replace(s, "_GOTYPE", "{{.ExecType.GoTypeName}}", -1) s = strings.Replace(s, "_FAMILY", "types.{{.Family}}", -1) s = strings.Replace(s, "_WIDTH", "{{.Width}}", -1) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/select_in_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/select_in_gen.go index 0ff43a38e9c3..de824c988bce 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/select_in_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/select_in_gen.go @@ -33,7 +33,7 @@ func genSelectIn(wr io.Writer) error { s = assignEq.ReplaceAllString(s, makeTemplateFunctionCall("Assign", 3)) s = strings.Replace(s, "_GOTYPE", "{{.LGoType}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) s = replaceManipulationFuncs(".LTyp", s) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/sort_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/sort_gen.go index 98753785d367..3c0be61e43ee 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/sort_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/sort_gen.go @@ -52,7 +52,7 @@ func genSortOps(wr io.Writer) error { s = strings.Replace(s, "_TYPE", "{{$typ}}", -1) s = strings.Replace(s, "_DIR_ENUM", "{{.Dir}}", -1) s = strings.Replace(s, "_DIR", "{{.DirString}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) s = strings.Replace(s, "_ISNULL", "{{$isNull}}", -1) s = strings.Replace(s, "_HANDLES_NULLS", "{{if .Nulls}}WithNulls{{else}}{{end}}", -1) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go index 4e2bbb6ec7a5..9ff9a100b5e5 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/sum_agg_gen.go @@ -32,7 +32,7 @@ func genSumAgg(wr io.Writer) error { s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) assignAddRe := makeFunctionRegex("_ASSIGN_ADD", 3) s = assignAddRe.ReplaceAllString(s, makeTemplateFunctionCall("Global.Assign", 3)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/values_differ_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/values_differ_gen.go index 35b2339797f3..aea995abd9a4 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/values_differ_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/values_differ_gen.go @@ -33,7 +33,7 @@ func genValuesDiffer(wr io.Writer) error { s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) s = strings.Replace(s, "_TYPE", "{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) assignNeRe := makeFunctionRegex("_ASSIGN_NE", 3) s = assignNeRe.ReplaceAllString(s, makeTemplateFunctionCall("Assign", 3)) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/vec_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/vec_gen.go index c8092617ba96..4ef3da3eb54f 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/vec_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/vec_gen.go @@ -32,7 +32,7 @@ func genVec(wr io.Writer) error { // Replace the template variables. s = strings.Replace(s, "_GOTYPE", "{{.LTyp.GoTypeName}}", -1) s = strings.Replace(s, "_TYPES_T", "coltypes.{{.LTyp}}", -1) - s = strings.Replace(s, "_TemplateType", "{{.LTyp}}", -1) + s = strings.Replace(s, "TemplateType", "{{.LTyp}}", -1) s = replaceManipulationFuncs(".LTyp", s) copyWithSel := makeFunctionRegex("_COPY_WITH_SEL", 6) diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index cf157f6e135a..f389c52a488d 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -155,8 +155,8 @@ func (v hashAggFuncs) match( switch typeconv.FromColumnType(&keyTyp) { // {{range .}} case _TYPES_T: - lhsCol := lhs._TemplateType() - rhsCol := rhs._TemplateType() + lhsCol := lhs.TemplateType() + rhsCol := rhs.TemplateType() if lhsHasNull { lhsNull := lhs.Nulls().NullAt(v.keyIdx) if rhsHasNull { diff --git a/pkg/sql/colexec/hash_utils_tmpl.go b/pkg/sql/colexec/hash_utils_tmpl.go index 5c8a516e4cb8..67ba26dd9dad 100644 --- a/pkg/sql/colexec/hash_utils_tmpl.go +++ b/pkg/sql/colexec/hash_utils_tmpl.go @@ -81,12 +81,13 @@ func _REHASH_BODY( // {{ else }} _ = execgen.UNSAFEGET(keys, nKeys-1) // {{ end }} + var selIdx int for i := 0; i < nKeys; i++ { cancelChecker.check(ctx) // {{ if .HasSel }} - selIdx := sel[i] + selIdx = sel[i] // {{ else }} - selIdx := i + selIdx = i // {{ end }} // {{ if .HasNulls }} if nulls.NullAt(selIdx) { @@ -121,7 +122,7 @@ func rehash( switch typeconv.FromColumnType(t) { // {{range $hashType := .}} case _TYPES_T: - keys, nulls := col._TemplateType(), col.Nulls() + keys, nulls := col.TemplateType(), col.Nulls() if col.MaybeHasNulls() { if sel != nil { _REHASH_BODY(ctx, buckets, keys, nulls, nKeys, sel, true, true) diff --git a/pkg/sql/colexec/hashtable_tmpl.go b/pkg/sql/colexec/hashtable_tmpl.go index cb5da33751d7..2390871ab923 100644 --- a/pkg/sql/colexec/hashtable_tmpl.go +++ b/pkg/sql/colexec/hashtable_tmpl.go @@ -74,8 +74,10 @@ func _CHECK_COL_BODY( _USE_BUILD_SEL bool, ) { // */}} // {{define "checkColBody" -}} - probeIsNull := false - buildIsNull := false + var ( + probeIdx, buildIdx int + probeIsNull, buildIsNull bool + ) // Early bounds check. _ = ht.probeScratch.toCheck[nToCheck-1] for i := uint64(0); i < nToCheck; i++ { @@ -89,18 +91,18 @@ func _CHECK_COL_BODY( // found. // {{if .UseProbeSel}} - probeIdx := probeSel[toCheck] + probeIdx = probeSel[toCheck] // {{else}} - probeIdx := int(toCheck) + probeIdx = int(toCheck) // {{end}} /* {{if .ProbeHasNulls }} */ probeIsNull = probeVec.Nulls().NullAt(probeIdx) /* {{end}} */ // {{if .UseBuildSel}} - buildIdx := buildSel[keyID-1] + buildIdx = buildSel[keyID-1] // {{else}} - buildIdx := int(keyID - 1) + buildIdx = int(keyID - 1) // {{end}} /* {{if .BuildHasNulls }} */ diff --git a/pkg/sql/colexec/mergejoinbase_tmpl.go b/pkg/sql/colexec/mergejoinbase_tmpl.go index b75073426003..5c7eadc89532 100644 --- a/pkg/sql/colexec/mergejoinbase_tmpl.go +++ b/pkg/sql/colexec/mergejoinbase_tmpl.go @@ -110,13 +110,13 @@ func (o *mergeJoinBase) isBufferedGroupFinished( if bufferedGroup.firstTuple[colIdx].Nulls().NullAt(0) { return true } - bufferedCol := bufferedGroup.firstTuple[colIdx]._TemplateType() + bufferedCol := bufferedGroup.firstTuple[colIdx].TemplateType() prevVal := execgen.UNSAFEGET(bufferedCol, 0) var curVal _GOTYPE if batch.ColVec(int(colIdx)).MaybeHasNulls() && batch.ColVec(int(colIdx)).Nulls().NullAt(tupleToLookAtIdx) { return true } - col := batch.ColVec(int(colIdx))._TemplateType() + col := batch.ColVec(int(colIdx)).TemplateType() curVal = execgen.UNSAFEGET(col, tupleToLookAtIdx) var match bool _ASSIGN_EQ(match, prevVal, curVal) diff --git a/pkg/sql/colexec/mergejoiner_tmpl.go b/pkg/sql/colexec/mergejoiner_tmpl.go index 4d5e22a46079..18c0c168d59b 100644 --- a/pkg/sql/colexec/mergejoiner_tmpl.go +++ b/pkg/sql/colexec/mergejoiner_tmpl.go @@ -123,8 +123,8 @@ func _PROBE_SWITCH( switch colType { // {{range $mjOverload := $.Global.MJOverloads }} case _TYPES_T: - lKeys := lVec._TemplateType() - rKeys := rVec._TemplateType() + lKeys := lVec.TemplateType() + rKeys := rVec.TemplateType() var lGroup, rGroup group for o.groups.nextGroupInCol(&lGroup, &rGroup) { curLIdx := lGroup.rowStartIdx @@ -634,9 +634,9 @@ func _LEFT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool, _HAS_NULLS bool) case _TYPES_T: var srcCol _GOTYPESLICE if src != nil { - srcCol = src._TemplateType() + srcCol = src.TemplateType() } - outCol := out._TemplateType() + outCol := out.TemplateType() var val _GOTYPE var srcStartIdx int @@ -826,8 +826,8 @@ func (o *mergeJoin_JOIN_TYPE_STRINGOp) buildLeftBufferedGroup( switch colType { // {{ range $.MJOverloads }} case _TYPES_T: - srcCol := src._TemplateType() - outCol := out._TemplateType() + srcCol := src.TemplateType() + outCol := out.TemplateType() var val _GOTYPE // Loop over every row in the group. for ; o.builderState.left.curSrcStartIdx < batchLength; o.builderState.left.curSrcStartIdx++ { @@ -921,9 +921,9 @@ func _RIGHT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool, _HAS_NULLS bool case _TYPES_T: var srcCol _GOTYPESLICE if src != nil { - srcCol = src._TemplateType() + srcCol = src.TemplateType() } - outCol := out._TemplateType() + outCol := out.TemplateType() // Loop over every group. for ; o.builderState.right.groupsIdx < len(rightGroups); o.builderState.right.groupsIdx++ { @@ -1123,8 +1123,8 @@ func (o *mergeJoin_JOIN_TYPE_STRINGOp) buildRightBufferedGroup( switch colType { // {{range $.MJOverloads }} case _TYPES_T: - srcCol := src._TemplateType() - outCol := out._TemplateType() + srcCol := src.TemplateType() + outCol := out.TemplateType() // Optimization in the case that group length is 1, use assign // instead of copy. diff --git a/pkg/sql/colexec/proj_const_ops_tmpl.go b/pkg/sql/colexec/proj_const_ops_tmpl.go index 4858ddce8206..bb4839a23a55 100644 --- a/pkg/sql/colexec/proj_const_ops_tmpl.go +++ b/pkg/sql/colexec/proj_const_ops_tmpl.go @@ -66,6 +66,9 @@ var _ duration.Duration // Dummy import to pull in "coltypes" package. var _ coltypes.T +// _NON_CONST_GOTYPESLICE is a template Go type slice variable. +type _NON_CONST_GOTYPESLICE interface{} + // _ASSIGN is the template function for assigning the first input to the result // of computation an operation on the second and the third inputs. func _ASSIGN(_, _, _ interface{}) { @@ -104,10 +107,11 @@ func (p _OP_CONST_NAME) Next(ctx context.Context) coldata.Batch { return coldata.ZeroBatch } vec := batch.ColVec(p.colIdx) + var col _NON_CONST_GOTYPESLICE // {{if _IS_CONST_LEFT}} - col := vec._R_TYP() + col = vec._R_TYP() // {{else}} - col := vec._L_TYP() + col = vec._L_TYP() // {{end}} projVec := batch.ColVec(p.outputIdx) if projVec.MaybeHasNulls() { @@ -225,10 +229,14 @@ func GetProjection_CONST_SIDEConstOperator( colIdx: colIdx, outputIdx: outputIdx, } + var ( + c interface{} + err error + ) // {{if _IS_CONST_LEFT}} - c, err := getDatumToPhysicalFn(leftType)(constArg) + c, err = getDatumToPhysicalFn(leftType)(constArg) // {{else}} - c, err := getDatumToPhysicalFn(rightType)(constArg) + c, err = getDatumToPhysicalFn(rightType)(constArg) // {{end}} if err != nil { return nil, err diff --git a/pkg/sql/colexec/rowstovec_tmpl.go b/pkg/sql/colexec/rowstovec_tmpl.go index bbcea313c480..d5685bea3b33 100644 --- a/pkg/sql/colexec/rowstovec_tmpl.go +++ b/pkg/sql/colexec/rowstovec_tmpl.go @@ -59,7 +59,7 @@ func _ROWS_TO_COL_VEC( rows sqlbase.EncDatumRows, vec coldata.Vec, columnIdx int, alloc *sqlbase.DatumAlloc, ) error { // */}} // {{define "rowsToColVec" -}} - col := vec._TemplateType() + col := vec.TemplateType() datumToPhysicalFn := getDatumToPhysicalFn(typ) for i := range rows { row := rows[i] diff --git a/pkg/sql/colexec/select_in_tmpl.go b/pkg/sql/colexec/select_in_tmpl.go index e7ce8bd9bfc6..f6cf342f16bc 100644 --- a/pkg/sql/colexec/select_in_tmpl.go +++ b/pkg/sql/colexec/select_in_tmpl.go @@ -210,7 +210,7 @@ func (si *selectInOp_TYPE) Next(ctx context.Context) coldata.Batch { } vec := batch.ColVec(si.colIdx) - col := vec._TemplateType() + col := vec.TemplateType() var idx int n := batch.Length() @@ -280,7 +280,7 @@ func (pi *projectInOp_TYPE) Next(ctx context.Context) coldata.Batch { } vec := batch.ColVec(pi.colIdx) - col := vec._TemplateType() + col := vec.TemplateType() projVec := batch.ColVec(pi.outputIdx) projCol := projVec.Bool() diff --git a/pkg/sql/colexec/selection_ops_tmpl.go b/pkg/sql/colexec/selection_ops_tmpl.go index a145a2f3b625..637a7d82b154 100644 --- a/pkg/sql/colexec/selection_ops_tmpl.go +++ b/pkg/sql/colexec/selection_ops_tmpl.go @@ -85,9 +85,9 @@ func _SEL_CONST_LOOP(_HAS_NULLS bool) { // */}} arg := execgen.UNSAFEGET(col, i) _ASSIGN_CMP(cmp, arg, p.constArg) // {{if _HAS_NULLS}} - isNull := nulls.NullAt(i) + isNull = nulls.NullAt(i) // {{else}} - isNull := false + isNull = false // {{end}} if cmp && !isNull { sel[idx] = i @@ -103,9 +103,9 @@ func _SEL_CONST_LOOP(_HAS_NULLS bool) { // */}} arg := execgen.UNSAFEGET(col, i) _ASSIGN_CMP(cmp, arg, p.constArg) // {{if _HAS_NULLS}} - isNull := nulls.NullAt(i) + isNull = nulls.NullAt(i) // {{else}} - isNull := false + isNull = false // {{end}} if cmp && !isNull { sel[idx] = i @@ -131,9 +131,9 @@ func _SEL_LOOP(_HAS_NULLS bool) { // */}} arg2 := _R_UNSAFEGET(col2, i) _ASSIGN_CMP(cmp, arg1, arg2) // {{if _HAS_NULLS}} - isNull := nulls.NullAt(i) + isNull = nulls.NullAt(i) // {{else}} - isNull := false + isNull = false // {{end}} if cmp && !isNull { sel[idx] = i @@ -157,9 +157,9 @@ func _SEL_LOOP(_HAS_NULLS bool) { // */}} arg2 := _R_UNSAFEGET(col2, i) _ASSIGN_CMP(cmp, arg1, arg2) // {{if _HAS_NULLS}} - isNull := nulls.NullAt(i) + isNull = nulls.NullAt(i) // {{else}} - isNull := false + isNull = false // {{end}} if cmp && !isNull { sel[idx] = i @@ -201,6 +201,7 @@ func (p *_OP_CONST_NAME) Next(ctx context.Context) coldata.Batch { // However, the scratch is not used in all of the selection operators, so // we add this to go around "unused" error. _ = decimalScratch + var isNull bool for { batch := p.input.Next(ctx) if batch.Length() == 0 { @@ -242,6 +243,7 @@ func (p *_OP_NAME) Next(ctx context.Context) coldata.Batch { // However, the scratch is not used in all of the selection operators, so // we add this to go around "unused" error. _ = decimalScratch + var isNull bool for { batch := p.input.Next(ctx) if batch.Length() == 0 { diff --git a/pkg/sql/colexec/sort_tmpl.go b/pkg/sql/colexec/sort_tmpl.go index a01021173e65..21b331f847b9 100644 --- a/pkg/sql/colexec/sort_tmpl.go +++ b/pkg/sql/colexec/sort_tmpl.go @@ -143,7 +143,7 @@ type sort_TYPE_DIR_HANDLES_NULLSOp struct { } func (s *sort_TYPE_DIR_HANDLES_NULLSOp) init(col coldata.Vec, order []int) { - s.sortCol = col._TemplateType() + s.sortCol = col.TemplateType() s.nulls = col.Nulls() s.order = order } diff --git a/pkg/sql/colexec/sum_agg_tmpl.go b/pkg/sql/colexec/sum_agg_tmpl.go index f7f25b2e33a3..3a95659a2d44 100644 --- a/pkg/sql/colexec/sum_agg_tmpl.go +++ b/pkg/sql/colexec/sum_agg_tmpl.go @@ -90,7 +90,7 @@ var _ aggregateFunc = &sum_TYPEAgg{} func (a *sum_TYPEAgg) Init(groups []bool, v coldata.Vec) { a.groups = groups - a.scratch.vec = v._TemplateType() + a.scratch.vec = v.TemplateType() a.scratch.nulls = v.Nulls() a.Reset() } @@ -132,7 +132,7 @@ func (a *sum_TYPEAgg) Compute(b coldata.Batch, inputIdxs []uint32) { return } vec, sel := b.ColVec(int(inputIdxs[0])), b.Selection() - col, nulls := vec._TemplateType(), vec.Nulls() + col, nulls := vec.TemplateType(), vec.Nulls() if nulls.MaybeHasNulls() { if sel != nil { sel = sel[:inputLen] diff --git a/pkg/sql/colexec/values_differ_tmpl.go b/pkg/sql/colexec/values_differ_tmpl.go index 7bf243586788..2ceff22a9657 100644 --- a/pkg/sql/colexec/values_differ_tmpl.go +++ b/pkg/sql/colexec/values_differ_tmpl.go @@ -75,8 +75,8 @@ func valuesDiffer( switch typeconv.FromColumnType(t) { // {{range .}} case _TYPES_T: - aCol := aColVec._TemplateType() - bCol := bColVec._TemplateType() + aCol := aColVec.TemplateType() + bCol := bColVec.TemplateType() aNulls := aColVec.Nulls() bNulls := bColVec.Nulls() aNull := aNulls.MaybeHasNulls() && aNulls.NullAt(aValueIdx) diff --git a/pkg/workload/workload.go b/pkg/workload/workload.go index 3aa257038f21..f82e6e0c6a97 100644 --- a/pkg/workload/workload.go +++ b/pkg/workload/workload.go @@ -411,7 +411,8 @@ func FromFlags(meta Meta, flags ...string) Generator { if !ok { panic(fmt.Sprintf(`generator %s does not accept flags: %v`, meta.Name, flags)) } - if err := f.Flags().Parse(flags); err != nil { + flagsStruct := f.Flags() + if err := flagsStruct.Parse(flags); err != nil { panic(fmt.Sprintf(`generator %s parsing flags %v: %v`, meta.Name, flags, err)) } } From 3146691e45a41ef9a00c865d0ec24588a5a5a044 Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 22 Apr 2020 13:49:16 -0700 Subject: [PATCH 3/5] colexec: remove one of the Go maps from hash aggregator This commit switches usage of `map` to iteration over `[]uint64` when building selection vectors in the hash aggregator. This is a lot more efficient when group sizes are relatively large with moderate hit when group sizes are small. This hit is reduced in a follow-up commit. Release note: None --- pkg/sql/colexec/hash_aggregator.go | 66 +++++++++++++++++++----------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/pkg/sql/colexec/hash_aggregator.go b/pkg/sql/colexec/hash_aggregator.go index fcc2d74173f9..e3ae0c706211 100644 --- a/pkg/sql/colexec/hash_aggregator.go +++ b/pkg/sql/colexec/hash_aggregator.go @@ -94,17 +94,33 @@ type hashAggregator struct { // // Instead of having a map from hashCode to []int (which could result // in having many int slices), we are using a constant number of such - // slices and have a map from hashCode to a "slot" in sels that does + // slices and have a "map" from hashCode to a "slot" in sels that does // the "translation." The key insight here is that we will have at most // batchTupleLimit (plus - possibly - constant excess) different // hashCodes at once. sels [][]int - // hashCodeToSelsSlot is a mapping from the hashCode to a slot in sels - // slice. New keys are added to this map when building the selections - // when new hashCode is encountered, and keys are deleted from the map - // in online aggregation phase once the tuples with the corresponding - // hash codes have been processed. - hashCodeToSelsSlot map[uint64]int + // hashCodeForSelsSlot stores the hashCode that corresponds to a slot + // in sels slice. For example, if we have tuples with the following + // hashCodes = {0, 2, 0, 0, 1, 2, 1}, then we will have: + // hashCodeForSelsSlot = {0, 2, 1} + // sels[0] = {0, 2, 3} + // sels[1] = {1, 5} + // sels[2] = {4, 6} + // Note that we're not using Golang's map for this purpose because + // although, in theory, it has O(1) amortized lookup cost, in practice, + // it is faster to do a linear search for a particular hashCode in this + // slice: given that we have at most coldata.BatchSize() number of + // different hashCodes - which is a constant - we get + // O(coldata.BatchSize()) = O(1) lookup cost. And now we have two cases + // to consider: + // 1. we have few distinct hashCodes (large group sizes), then the + // overhead of linear search will be significantly lower than of a + // lookup in map + // 2. we have many distinct hashCodes (small group sizes), then the + // map *might* outperform the linear search, but the time spent in + // other parts of the hash aggregator will dominate the total runtime, + // so this would not matter. + hashCodeForSelsSlot []uint64 // group is a boolean vector where "true" represent the beginning of a group // in the column. It is shared among all aggregation functions. Since @@ -227,7 +243,7 @@ func (op *hashAggregator) Init() { op.allocator, op.inputTypes, maxBufferedTuples, ) op.scratch.sels = make([][]int, maxBufferedTuples) - op.scratch.hashCodeToSelsSlot = make(map[uint64]int) + op.scratch.hashCodeForSelsSlot = make([]uint64, maxBufferedTuples) op.scratch.group = make([]bool, maxBufferedTuples) // Eventually, op.keyMapping will contain as many tuples as there are // groups in the input, but we don't know that number upfront, so we @@ -364,15 +380,24 @@ func (op *hashAggregator) buildSelectionForEachHashCode(ctx context.Context) { // they all are of zero length here (see the comment for op.scratch.sels // for context). - nextSelsSlot := 0 // We can use selIdx to index into op.scratch since op.scratch never has a // a selection vector. + op.scratch.hashCodeForSelsSlot = op.scratch.hashCodeForSelsSlot[:0] for selIdx, hashCode := range hashBuffer { - selsSlot, ok := op.scratch.hashCodeToSelsSlot[hashCode] - if !ok { - selsSlot = nextSelsSlot - op.scratch.hashCodeToSelsSlot[hashCode] = selsSlot - nextSelsSlot++ + selsSlot := -1 + for slot, hash := range op.scratch.hashCodeForSelsSlot { + if hash == hashCode { + // We have already seen a tuple with the same hashCode + // previously, so we will append into the same sels slot. + selsSlot = slot + break + } + } + if selsSlot < 0 { + // This is the first tuple in hashBuffer with this hashCode, so we + // will add this tuple to the next available sels slot. + selsSlot = len(op.scratch.hashCodeForSelsSlot) + op.scratch.hashCodeForSelsSlot = append(op.scratch.hashCodeForSelsSlot, hashCode) } op.scratch.sels[selsSlot] = append(op.scratch.sels[selsSlot], selIdx) } @@ -382,14 +407,7 @@ func (op *hashAggregator) buildSelectionForEachHashCode(ctx context.Context) { // aggFunctions for each group if it doesn't not exist. Then it calls Compute() // on each aggregation function to perform aggregation. func (op *hashAggregator) onlineAgg() { - for _, hashCode := range op.hashBuffer { - selsSlot, ok := op.scratch.hashCodeToSelsSlot[hashCode] - if !ok { - // It is possible that multiple tuples have the same hashCode, and - // we process all such tuples when we encounter the first of these - // tuples. - continue - } + for selsSlot, hashCode := range op.scratch.hashCodeForSelsSlot { remaining := op.scratch.sels[selsSlot] var anyMatched bool @@ -459,7 +477,7 @@ func (op *hashAggregator) onlineAgg() { // Hack required to get aggregation function working. See '.scratch.group' // field comment in hashAggregator for more details. op.scratch.group[groupStartIdx] = true - aggFunc.init(op.scratch.group, op.output) + aggFunc.init(op.scratch.group, op.output.Batch) aggFunc.compute(op.scratch, op.aggCols) op.scratch.group[groupStartIdx] = false } @@ -467,8 +485,6 @@ func (op *hashAggregator) onlineAgg() { // We have processed all tuples with this hashCode, so we should reset // the length of the corresponding slice. op.scratch.sels[selsSlot] = op.scratch.sels[selsSlot][:0] - // We also need to delete the hashCode from the mapping. - delete(op.scratch.hashCodeToSelsSlot, hashCode) } } From f3cfbf47cf72c6b5c860c660369dc3e05fa9020e Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 22 Apr 2020 17:41:28 -0700 Subject: [PATCH 4/5] colexec: more improvements to hash aggregator This commit removes the buffering stage of the hash aggregator as well as removes the "append only" scratch batch that we're currently using. The removal of buffering stage allows us to have smaller buffers without sacrificing the performance. The removal of the scratch batch allows to avoid copying over the data from the input batch and using that input batch directly. We will be descructively modifying the selection vector on that batch, but such behavior is acceptable because hash aggregator owns the output batch, and the input batch will not be propagated further. This commit also bumps `hashAggFuncsAllocSize` from 16 to 64 which gives us minor performance improvement in case of small group sizes. Release note: None --- .../cmd/execgen/hash_aggregator_gen.go | 3 + pkg/sql/colexec/hash_aggregator.go | 165 +++++------------- pkg/sql/colexec/hash_aggregator_tmpl.go | 44 +++++ 3 files changed, 94 insertions(+), 118 deletions(-) diff --git a/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go b/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go index eade36cd8b3f..2a794428f274 100644 --- a/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go +++ b/pkg/sql/colexec/execgen/cmd/execgen/hash_aggregator_gen.go @@ -36,6 +36,9 @@ func genHashAggregator(wr io.Writer) error { assignCmpRe := makeFunctionRegex("_ASSIGN_NE", 3) s = assignCmpRe.ReplaceAllString(s, makeTemplateFunctionCall("Global.Assign", 3)) + populateSels := makeFunctionRegex("_POPULATE_SELS", 3) + s = populateSels.ReplaceAllString(s, `{{template "populateSels" buildDict "Global" . "BatchHasSelection" $3}}`) + matchLoop := makeFunctionRegex("_MATCH_LOOP", 8) s = matchLoop.ReplaceAllString( s, `{{template "matchLoop" buildDict "Global" . "LhsMaybeHasNulls" $7 "RhsMaybeHasNulls" $8}}`) diff --git a/pkg/sql/colexec/hash_aggregator.go b/pkg/sql/colexec/hash_aggregator.go index e3ae0c706211..419df85d4fb0 100644 --- a/pkg/sql/colexec/hash_aggregator.go +++ b/pkg/sql/colexec/hash_aggregator.go @@ -28,19 +28,14 @@ import ( type hashAggregatorState int const ( - // hashAggregatorBuffering is the state in which the hashAggregator is - // buffering up its inputs. - hashAggregatorBuffering hashAggregatorState = iota - // hashAggregatorAggregating is the state in which the hashAggregator is - // performing aggregation on its buffered inputs. After aggregation is done, - // the input buffer used in hashAggregatorBuffering phase is reset and ready - // to be reused. - hashAggregatorAggregating + // reading the batches from the input and performing aggregation on them, + // one at a time. After the input has been fully exhausted, hashAggregator + // transitions to hashAggregatorOutputting state. + hashAggregatorAggregating hashAggregatorState = iota // hashAggregatorOutputting is the state in which the hashAggregator is - // writing its aggregation results to output buffer after it has exhausted all - // inputs and finished aggregating. + // writing its aggregation results to output buffer after. hashAggregatorOutputting // hashAggregatorDone is the state in which the hashAggregator has finished @@ -50,12 +45,12 @@ const ( // hashAggregator is an operator that performs aggregation based on specified // grouping columns. This operator performs aggregation in online fashion. It -// buffers the input up to batchTupleLimit. Then the aggregator hashes each -// tuple and groups the tuples with same hash code into same group. Then -// aggregation function is lazily created for each group. The tuples in that -// group will be then passed into the aggregation function. After all input is -// exhausted, the operator begins to write the result into an output buffer. The -// output row ordering of this operator is arbitrary. +// reads the input one batch at a time, hashes each tuple from the batch and +// groups the tuples with same hash code into same group. Then aggregation +// function is lazily created for each group. The tuples in that group will be +// then passed into the aggregation function. After the input is exhausted, the +// operator begins to write the result into an output buffer. The output row +// ordering of this operator is arbitrary. type hashAggregator struct { OneInputNode @@ -75,16 +70,10 @@ type hashAggregator struct { // handle hash collisions. aggFuncMap hashAggFuncMap - // batchTupleLimit limits the number of tuples the aggregator will buffer - // before it starts to perform aggregation. - batchTupleLimit int - // state stores the current state of hashAggregator. state hashAggregatorState scratch struct { - *appendOnlyBufferedBatch - // sels stores the intermediate selection vector for each hash code. It // is maintained in such a way that when for a particular hashCode // there are no tuples in the batch, the corresponding int slice is of @@ -96,8 +85,7 @@ type hashAggregator struct { // in having many int slices), we are using a constant number of such // slices and have a "map" from hashCode to a "slot" in sels that does // the "translation." The key insight here is that we will have at most - // batchTupleLimit (plus - possibly - constant excess) different - // hashCodes at once. + // coldata.BatchSize() different hashCodes at once. sels [][]int // hashCodeForSelsSlot stores the hashCode that corresponds to a slot // in sels slice. For example, if we have tuples with the following @@ -205,9 +193,6 @@ func NewHashAggregator( groupTypes[i] = typs[colIdx] } - // We picked value this as the result of our benchmark. - tupleLimit := coldata.BatchSize() * 2 - inputPhysTypes, err := typeconv.FromColumnTypes(typs) return &hashAggregator{ OneInputNode: NewOneInputNode(input), @@ -218,9 +203,7 @@ func NewHashAggregator( aggTypes: aggTyps, aggFuncMap: make(hashAggFuncMap), - batchTupleLimit: tupleLimit, - - state: hashAggregatorBuffering, + state: hashAggregatorAggregating, inputTypes: typs, inputPhysTypes: inputPhysTypes, outputTypes: outputTypes, @@ -234,45 +217,29 @@ func (op *hashAggregator) Init() { op.input.Init() op.output.Batch = op.allocator.NewMemBatch(op.outputTypes) - // We allocate additional coldata.BatchSize for scratch buffer and hashBuffer - // to accommodate the case where sometimes number of buffered tuples exceeds - // op.batchTupleLimit. This is because we perform checks after appending the - // input tuples to the scratch buffer. - maxBufferedTuples := op.batchTupleLimit + coldata.BatchSize() - op.scratch.appendOnlyBufferedBatch = newAppendOnlyBufferedBatch( - op.allocator, op.inputTypes, maxBufferedTuples, - ) - op.scratch.sels = make([][]int, maxBufferedTuples) - op.scratch.hashCodeForSelsSlot = make([]uint64, maxBufferedTuples) - op.scratch.group = make([]bool, maxBufferedTuples) + op.scratch.sels = make([][]int, coldata.BatchSize()) + op.scratch.hashCodeForSelsSlot = make([]uint64, coldata.BatchSize()) + op.scratch.group = make([]bool, coldata.BatchSize()) // Eventually, op.keyMapping will contain as many tuples as there are // groups in the input, but we don't know that number upfront, so we // allocate it with some reasonably sized constant capacity. op.keyMapping = newAppendOnlyBufferedBatch( - op.allocator, op.groupTypes, op.batchTupleLimit, + op.allocator, op.groupTypes, coldata.BatchSize(), ) - - op.hashBuffer = make([]uint64, maxBufferedTuples) + op.hashBuffer = make([]uint64, coldata.BatchSize()) } func (op *hashAggregator) Next(ctx context.Context) coldata.Batch { for { switch op.state { - case hashAggregatorBuffering: - op.scratch.ResetInternalBatch() - op.scratch.SetLength(0) - - // Buffering up input batches. - if done := op.bufferBatch(ctx); done { + case hashAggregatorAggregating: + b := op.input.Next(ctx) + if b.Length() == 0 { op.state = hashAggregatorOutputting continue } - - op.buildSelectionForEachHashCode(ctx) - op.state = hashAggregatorAggregating - case hashAggregatorAggregating: - op.onlineAgg() - op.state = hashAggregatorBuffering + op.buildSelectionForEachHashCode(ctx, b) + op.onlineAgg(b) case hashAggregatorOutputting: curOutputIdx := 0 op.output.ResetInternalBatch() @@ -338,25 +305,8 @@ func (op *hashAggregator) Next(ctx context.Context) coldata.Batch { } } -// bufferBatch buffers up batches from input sources until number of tuples -// reaches batchTupleLimit. It returns true when the hash aggregator has -// consumed all batches from input. -func (op *hashAggregator) bufferBatch(ctx context.Context) bool { - for op.scratch.Length() < op.batchTupleLimit { - b := op.input.Next(ctx) - batchSize := b.Length() - if batchSize == 0 { - break - } - op.allocator.PerformOperation(op.scratch.ColVecs(), func() { - op.scratch.append(b, 0 /* startIdx */, batchSize) - }) - } - return op.scratch.Length() == 0 -} - -func (op *hashAggregator) buildSelectionForEachHashCode(ctx context.Context) { - nKeys := op.scratch.Length() +func (op *hashAggregator) buildSelectionForEachHashCode(ctx context.Context, b coldata.Batch) { + nKeys := b.Length() hashBuffer := op.hashBuffer[:nKeys] initHash(hashBuffer, nKeys, defaultInitHashValue) @@ -365,48 +315,25 @@ func (op *hashAggregator) buildSelectionForEachHashCode(ctx context.Context) { rehash(ctx, hashBuffer, &op.inputTypes[colIdx], - op.scratch.ColVec(int(colIdx)), + b.ColVec(int(colIdx)), nKeys, - nil, /* sel */ + b.Selection(), op.cancelChecker, - op.decimalScratch) + op.decimalScratch, + ) } if op.testingKnobs.numOfHashBuckets != 0 { finalizeHash(hashBuffer, nKeys, op.testingKnobs.numOfHashBuckets) } - // Note: we don't need to reset any of the slices in op.scratch.sels since - // they all are of zero length here (see the comment for op.scratch.sels - // for context). - - // We can use selIdx to index into op.scratch since op.scratch never has a - // a selection vector. - op.scratch.hashCodeForSelsSlot = op.scratch.hashCodeForSelsSlot[:0] - for selIdx, hashCode := range hashBuffer { - selsSlot := -1 - for slot, hash := range op.scratch.hashCodeForSelsSlot { - if hash == hashCode { - // We have already seen a tuple with the same hashCode - // previously, so we will append into the same sels slot. - selsSlot = slot - break - } - } - if selsSlot < 0 { - // This is the first tuple in hashBuffer with this hashCode, so we - // will add this tuple to the next available sels slot. - selsSlot = len(op.scratch.hashCodeForSelsSlot) - op.scratch.hashCodeForSelsSlot = append(op.scratch.hashCodeForSelsSlot, hashCode) - } - op.scratch.sels[selsSlot] = append(op.scratch.sels[selsSlot], selIdx) - } + op.populateSels(b, hashBuffer) } // onlineAgg probes aggFuncMap using the built sels map and lazily creates // aggFunctions for each group if it doesn't not exist. Then it calls Compute() // on each aggregation function to perform aggregation. -func (op *hashAggregator) onlineAgg() { +func (op *hashAggregator) onlineAgg(b coldata.Batch) { for selsSlot, hashCode := range op.scratch.hashCodeForSelsSlot { remaining := op.scratch.sels[selsSlot] @@ -416,16 +343,17 @@ func (op *hashAggregator) onlineAgg() { // aggregation. if aggFuncs, ok := op.aggFuncMap[hashCode]; ok { for _, aggFunc := range aggFuncs { - // We write the selection vector of matched tuples directly into the - // selection vector of op.scratch and selection vector of unmatched - // tuples into 'remaining'.'remaining' will reuse the underlying memory - // allocated for 'sel' to avoid extra allocation and copying. + // We write the selection vector of matched tuples directly + // into the selection vector of b and selection vector of + // unmatched tuples into 'remaining'.'remaining' will reuse the + // underlying memory allocated for 'sel' to avoid extra + // allocation and copying. anyMatched, remaining = aggFunc.match( - remaining, op.scratch, op.groupCols, op.groupTypes, op.keyMapping, + remaining, b, op.groupCols, op.groupTypes, op.keyMapping, op.scratch.group[:len(remaining)], false, /* firstDefiniteMatch */ ) if anyMatched { - aggFunc.compute(op.scratch, op.aggCols) + aggFunc.compute(b, op.aggCols) } } } else { @@ -453,7 +381,7 @@ func (op *hashAggregator) onlineAgg() { // .Append() we can use execgen.SET to improve the // performance. op.keyMapping.ColVec(keyIdx).Append(coldata.SliceArgs{ - Src: op.scratch.ColVec(int(colIdx)), + Src: b.ColVec(int(colIdx)), ColType: op.inputPhysTypes[colIdx], DestIdx: aggFunc.keyIdx, SrcStartIdx: groupStartIdx, @@ -470,7 +398,7 @@ func (op *hashAggregator) onlineAgg() { // to check if there is any match since 'remaining[0]' will always be // matched. _, remaining = aggFunc.match( - remaining, op.scratch, op.groupCols, op.groupTypes, op.keyMapping, + remaining, b, op.groupCols, op.groupTypes, op.keyMapping, op.scratch.group[:len(remaining)], true, /* firstDefiniteMatch */ ) @@ -478,7 +406,7 @@ func (op *hashAggregator) onlineAgg() { // field comment in hashAggregator for more details. op.scratch.group[groupStartIdx] = true aggFunc.init(op.scratch.group, op.output.Batch) - aggFunc.compute(op.scratch, op.aggCols) + aggFunc.compute(b, op.aggCols) op.scratch.group[groupStartIdx] = false } @@ -496,15 +424,12 @@ func (op *hashAggregator) reset(ctx context.Context) { } op.aggFuncMap = hashAggFuncMap{} - op.state = hashAggregatorBuffering + op.state = hashAggregatorAggregating op.output.ResetInternalBatch() op.output.SetLength(0) op.output.pendingOutput = false - op.scratch.ResetInternalBatch() - op.scratch.SetLength(0) - op.keyMapping.ResetInternalBatch() op.keyMapping.SetLength(0) } @@ -533,7 +458,11 @@ func (v *hashAggFuncs) compute(b coldata.Batch, aggCols [][]uint32) { } } -const hashAggFuncsAllocSize = 16 +// hashAggFuncsAllocSize determines the allocation size used by +// hashAggFuncsAlloc. This number was chosen after running benchmarks of 'sum' +// aggregation on ints and decimals with varying group sizes (powers of 2 from +// 1 to 4096). +const hashAggFuncsAllocSize = 64 // hashAggFuncsAlloc is a utility struct that batches allocations of // hashAggFuncs. diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index cf157f6e135a..dd51f9ffd9f4 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -61,6 +61,50 @@ func _ASSIGN_NE(_, _, _ interface{}) int { // */}} +// {{/* +func _POPULATE_SELS(b coldata.Batch, hashBuffer []uint64, _BATCH_HAS_SELECTION bool) { // */}} + // {{define "populateSels" -}} + for selIdx, hashCode := range hashBuffer { + selsSlot := -1 + for slot, hash := range op.scratch.hashCodeForSelsSlot { + if hash == hashCode { + // We have already seen a tuple with the same hashCode + // previously, so we will append into the same sels slot. + selsSlot = slot + break + } + } + if selsSlot < 0 { + // This is the first tuple in hashBuffer with this hashCode, so we + // will add this tuple to the next available sels slot. + selsSlot = len(op.scratch.hashCodeForSelsSlot) + op.scratch.hashCodeForSelsSlot = append(op.scratch.hashCodeForSelsSlot, hashCode) + } + // {{if .BatchHasSelection}} + op.scratch.sels[selsSlot] = append(op.scratch.sels[selsSlot], batchSelection[selIdx]) + // {{else}} + op.scratch.sels[selsSlot] = append(op.scratch.sels[selsSlot], selIdx) + // {{end}} + } + // {{end}} + // {{/* +} // */}} + +// populateSels populates intermediate selection vectors (stored in +// op.scratch.sels) for each hash code present in b. hashBuffer must contain +// the hash codes for all of the tuples in b. +func (op *hashAggregator) populateSels(b coldata.Batch, hashBuffer []uint64) { + // Note: we don't need to reset any of the slices in op.scratch.sels since + // they all are of zero length here (see the comment for op.scratch.sels + // for context). + op.scratch.hashCodeForSelsSlot = op.scratch.hashCodeForSelsSlot[:0] + if batchSelection := b.Selection(); batchSelection != nil { + _POPULATE_SELS(b, hashBuffer, true) + } else { + _POPULATE_SELS(b, hashBuffer, false) + } +} + // {{/* func _MATCH_LOOP( sel []int, From 1444f9055be896e332ab12168e880e1a3da80522 Mon Sep 17 00:00:00 2001 From: Yahor Yuzefovich Date: Wed, 22 Apr 2020 15:58:42 -0700 Subject: [PATCH 5/5] colexec: remove some allocations In a recent PR (for logical types plumbing) I introduced some unnecessary allocations for unhandled type case - by taking a pointer from a value in `[]types.T` slice. This commit fixes that. Release note: None --- pkg/sql/colexec/hash_aggregator_tmpl.go | 6 ++---- pkg/sql/colexec/hashtable_tmpl.go | 2 +- pkg/sql/colexec/mergejoinbase_tmpl.go | 6 ++---- pkg/sql/colexec/orderedsynchronizer_tmpl.go | 2 +- pkg/sql/colexec/vec_comparators_tmpl.go | 2 +- 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/pkg/sql/colexec/hash_aggregator_tmpl.go b/pkg/sql/colexec/hash_aggregator_tmpl.go index dd51f9ffd9f4..c0b7c7c3eb8e 100644 --- a/pkg/sql/colexec/hash_aggregator_tmpl.go +++ b/pkg/sql/colexec/hash_aggregator_tmpl.go @@ -194,9 +194,7 @@ func (v hashAggFuncs) match( rhs := b.ColVec(int(colIdx)) rhsHasNull := rhs.MaybeHasNulls() - keyTyp := keyTypes[keyIdx] - - switch typeconv.FromColumnType(&keyTyp) { + switch typeconv.FromColumnType(&keyTypes[keyIdx]) { // {{range .}} case _TYPES_T: lhsCol := lhs._TemplateType() @@ -217,7 +215,7 @@ func (v hashAggFuncs) match( } // {{end}} default: - colexecerror.InternalError(fmt.Sprintf("unhandled type %s", &keyTyp)) + colexecerror.InternalError(fmt.Sprintf("unhandled type %s", keyTypes[keyIdx].String())) } } } diff --git a/pkg/sql/colexec/hashtable_tmpl.go b/pkg/sql/colexec/hashtable_tmpl.go index cb5da33751d7..bb5025e2577d 100644 --- a/pkg/sql/colexec/hashtable_tmpl.go +++ b/pkg/sql/colexec/hashtable_tmpl.go @@ -286,7 +286,7 @@ func (ht *hashTable) checkColForDistinctTuples( // {{end}} // {{end}} default: - colexecerror.InternalError(fmt.Sprintf("unhandled type %s", probeType)) + colexecerror.InternalError(fmt.Sprintf("unhandled type %s", probeType.Name())) } } diff --git a/pkg/sql/colexec/mergejoinbase_tmpl.go b/pkg/sql/colexec/mergejoinbase_tmpl.go index b75073426003..4d87c72bc8a4 100644 --- a/pkg/sql/colexec/mergejoinbase_tmpl.go +++ b/pkg/sql/colexec/mergejoinbase_tmpl.go @@ -97,9 +97,7 @@ func (o *mergeJoinBase) isBufferedGroupFinished( // Check all equality columns in the first row of batch to make sure we're in // the same group. for _, colIdx := range input.eqCols[:len(input.eqCols)] { - typ := input.sourceTypes[colIdx] - - switch typeconv.FromColumnType(&typ) { + switch typeconv.FromColumnType(&input.sourceTypes[colIdx]) { // {{ range . }} case _TYPES_T: // We perform this null check on every equality column of the first @@ -125,7 +123,7 @@ func (o *mergeJoinBase) isBufferedGroupFinished( } // {{end}} default: - colexecerror.InternalError(fmt.Sprintf("unhandled type %s", &typ)) + colexecerror.InternalError(fmt.Sprintf("unhandled type %s", input.sourceTypes[colIdx].String())) } } return false diff --git a/pkg/sql/colexec/orderedsynchronizer_tmpl.go b/pkg/sql/colexec/orderedsynchronizer_tmpl.go index e9f5c12d908f..3e4db339d0db 100644 --- a/pkg/sql/colexec/orderedsynchronizer_tmpl.go +++ b/pkg/sql/colexec/orderedsynchronizer_tmpl.go @@ -224,7 +224,7 @@ func (o *OrderedSynchronizer) Init() { o.out_TYPECols = append(o.out_TYPECols, outVec._TYPE()) // {{end}} default: - colexecerror.InternalError(fmt.Sprintf("unhandled type %s", &o.typs[i])) + colexecerror.InternalError(fmt.Sprintf("unhandled type %s", o.typs[i].String())) } } for i := range o.inputs { diff --git a/pkg/sql/colexec/vec_comparators_tmpl.go b/pkg/sql/colexec/vec_comparators_tmpl.go index 7e4f5a8f8e14..7eec91682b80 100644 --- a/pkg/sql/colexec/vec_comparators_tmpl.go +++ b/pkg/sql/colexec/vec_comparators_tmpl.go @@ -151,7 +151,7 @@ func GetVecComparator(t *types.T, numVecs int) vecComparator { } // {{end}} } - colexecerror.InternalError(fmt.Sprintf("unhandled type %v", t)) + colexecerror.InternalError(fmt.Sprintf("unhandled type %s", t.Name())) // This code is unreachable, but the compiler cannot infer that. return nil }