diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 3fb9e232209f..0da30d8b52ab 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -527,14 +527,6 @@ func (l *descriptorSet) findVersion(version descpb.DescriptorVersion) *descripto } type descriptorState struct { - id descpb.ID - stopper *stop.Stopper - - // renewalInProgress is an atomic indicator for when a renewal for a - // lease has begun. This is atomic to prevent multiple routines from - // entering renewal initialization. - renewalInProgress int32 - mu struct { syncutil.Mutex @@ -559,6 +551,14 @@ type descriptorState struct { // ignored. acquisitionsInProgress int } + + stopper *stop.Stopper + id descpb.ID + + // renewalInProgress is an atomic indicator for when a renewal for a + // lease has begun. This is atomic to prevent multiple routines from + // entering renewal initialization. + renewalInProgress int32 } // ensureVersion ensures that the latest version >= minVersion. It will diff --git a/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go b/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go index 089ac2815052..1a3d2d7ada24 100644 --- a/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/hash_min_max_agg.eg.go @@ -92,16 +92,16 @@ func newMaxHashAggAlloc( } type minBoolHashAgg struct { + // col points to the output vector we are updating. + col coldata.Bools + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg bool - // col points to the output vector we are updating. - col coldata.Bools - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -238,16 +238,16 @@ func (a *minBoolHashAggAlloc) newAggFunc() AggregateFunc { } type minBytesHashAgg struct { + // col points to the output vector we are updating. + col *coldata.Bytes + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg []byte - // col points to the output vector we are updating. - col *coldata.Bytes - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -374,16 +374,16 @@ func (a *minBytesHashAggAlloc) newAggFunc() AggregateFunc { } type minDecimalHashAgg struct { + // col points to the output vector we are updating. + col coldata.Decimals + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg apd.Decimal - // col points to the output vector we are updating. - col coldata.Decimals - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -504,16 +504,16 @@ func (a *minDecimalHashAggAlloc) newAggFunc() AggregateFunc { } type minInt16HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int16s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int16 - // col points to the output vector we are updating. - col coldata.Int16s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -656,16 +656,16 @@ func (a *minInt16HashAggAlloc) newAggFunc() AggregateFunc { } type minInt32HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int32s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int32 - // col points to the output vector we are updating. - col coldata.Int32s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -808,16 +808,16 @@ func (a *minInt32HashAggAlloc) newAggFunc() AggregateFunc { } type minInt64HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int64 - // col points to the output vector we are updating. - col coldata.Int64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -960,16 +960,16 @@ func (a *minInt64HashAggAlloc) newAggFunc() AggregateFunc { } type minFloat64HashAgg struct { + // col points to the output vector we are updating. + col coldata.Float64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg float64 - // col points to the output vector we are updating. - col coldata.Float64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1128,16 +1128,16 @@ func (a *minFloat64HashAggAlloc) newAggFunc() AggregateFunc { } type minTimestampHashAgg struct { + // col points to the output vector we are updating. + col coldata.Times + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg time.Time - // col points to the output vector we are updating. - col coldata.Times - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1272,16 +1272,16 @@ func (a *minTimestampHashAggAlloc) newAggFunc() AggregateFunc { } type minIntervalHashAgg struct { + // col points to the output vector we are updating. + col coldata.Durations + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg duration.Duration - // col points to the output vector we are updating. - col coldata.Durations - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1402,16 +1402,16 @@ func (a *minIntervalHashAggAlloc) newAggFunc() AggregateFunc { } type minDatumHashAgg struct { + // col points to the output vector we are updating. + col coldata.DatumVec + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg interface{} - // col points to the output vector we are updating. - col coldata.DatumVec - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1550,16 +1550,16 @@ func (a *minDatumHashAggAlloc) newAggFunc() AggregateFunc { } type maxBoolHashAgg struct { + // col points to the output vector we are updating. + col coldata.Bools + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg bool - // col points to the output vector we are updating. - col coldata.Bools - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1696,16 +1696,16 @@ func (a *maxBoolHashAggAlloc) newAggFunc() AggregateFunc { } type maxBytesHashAgg struct { + // col points to the output vector we are updating. + col *coldata.Bytes + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg []byte - // col points to the output vector we are updating. - col *coldata.Bytes - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1832,16 +1832,16 @@ func (a *maxBytesHashAggAlloc) newAggFunc() AggregateFunc { } type maxDecimalHashAgg struct { + // col points to the output vector we are updating. + col coldata.Decimals + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg apd.Decimal - // col points to the output vector we are updating. - col coldata.Decimals - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1962,16 +1962,16 @@ func (a *maxDecimalHashAggAlloc) newAggFunc() AggregateFunc { } type maxInt16HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int16s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int16 - // col points to the output vector we are updating. - col coldata.Int16s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2114,16 +2114,16 @@ func (a *maxInt16HashAggAlloc) newAggFunc() AggregateFunc { } type maxInt32HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int32s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int32 - // col points to the output vector we are updating. - col coldata.Int32s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2266,16 +2266,16 @@ func (a *maxInt32HashAggAlloc) newAggFunc() AggregateFunc { } type maxInt64HashAgg struct { + // col points to the output vector we are updating. + col coldata.Int64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int64 - // col points to the output vector we are updating. - col coldata.Int64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2418,16 +2418,16 @@ func (a *maxInt64HashAggAlloc) newAggFunc() AggregateFunc { } type maxFloat64HashAgg struct { + // col points to the output vector we are updating. + col coldata.Float64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg float64 - // col points to the output vector we are updating. - col coldata.Float64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2586,16 +2586,16 @@ func (a *maxFloat64HashAggAlloc) newAggFunc() AggregateFunc { } type maxTimestampHashAgg struct { + // col points to the output vector we are updating. + col coldata.Times + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg time.Time - // col points to the output vector we are updating. - col coldata.Times - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2730,16 +2730,16 @@ func (a *maxTimestampHashAggAlloc) newAggFunc() AggregateFunc { } type maxIntervalHashAgg struct { + // col points to the output vector we are updating. + col coldata.Durations + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg duration.Duration - // col points to the output vector we are updating. - col coldata.Durations - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2860,16 +2860,16 @@ func (a *maxIntervalHashAggAlloc) newAggFunc() AggregateFunc { } type maxDatumHashAgg struct { + // col points to the output vector we are updating. + col coldata.DatumVec + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec hashAggregateFuncBase allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg interface{} - // col points to the output vector we are updating. - col coldata.DatumVec - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool diff --git a/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go b/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go index 493c1c4662f8..ffc6c378160e 100644 --- a/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go +++ b/pkg/sql/colexec/colexecagg/min_max_agg_tmpl.go @@ -117,7 +117,12 @@ func newMax_AGGKINDAggAlloc( type _AGG_TYPE_AGGKINDAgg struct { // {{if eq "_AGGKIND" "Ordered"}} orderedAggregateFuncBase - // {{else}} + // {{end}} + // col points to the output vector we are updating. + col _GOTYPESLICE + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec + // {{if eq "_AGGKIND" "Hash"}} hashAggregateFuncBase // {{end}} allocator *colmem.Allocator @@ -125,10 +130,6 @@ type _AGG_TYPE_AGGKINDAgg struct { // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg _GOTYPE - // col points to the output vector we are updating. - col _GOTYPESLICE - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool diff --git a/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go b/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go index c91cac8a717e..c4a10a9e58d6 100644 --- a/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go +++ b/pkg/sql/colexec/colexecagg/ordered_min_max_agg.eg.go @@ -93,15 +93,15 @@ func newMaxOrderedAggAlloc( type minBoolOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Bools + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg bool - // col points to the output vector we are updating. - col coldata.Bools - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -364,15 +364,15 @@ func (a *minBoolOrderedAggAlloc) newAggFunc() AggregateFunc { type minBytesOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col *coldata.Bytes + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg []byte - // col points to the output vector we are updating. - col *coldata.Bytes - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -611,15 +611,15 @@ func (a *minBytesOrderedAggAlloc) newAggFunc() AggregateFunc { type minDecimalOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Decimals + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg apd.Decimal - // col points to the output vector we are updating. - col coldata.Decimals - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -850,15 +850,15 @@ func (a *minDecimalOrderedAggAlloc) newAggFunc() AggregateFunc { type minInt16OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int16s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int16 - // col points to the output vector we are updating. - col coldata.Int16s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1133,15 +1133,15 @@ func (a *minInt16OrderedAggAlloc) newAggFunc() AggregateFunc { type minInt32OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int32s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int32 - // col points to the output vector we are updating. - col coldata.Int32s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1416,15 +1416,15 @@ func (a *minInt32OrderedAggAlloc) newAggFunc() AggregateFunc { type minInt64OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int64 - // col points to the output vector we are updating. - col coldata.Int64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -1699,15 +1699,15 @@ func (a *minInt64OrderedAggAlloc) newAggFunc() AggregateFunc { type minFloat64OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Float64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg float64 - // col points to the output vector we are updating. - col coldata.Float64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2014,15 +2014,15 @@ func (a *minFloat64OrderedAggAlloc) newAggFunc() AggregateFunc { type minTimestampOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Times + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg time.Time - // col points to the output vector we are updating. - col coldata.Times - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2281,15 +2281,15 @@ func (a *minTimestampOrderedAggAlloc) newAggFunc() AggregateFunc { type minIntervalOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Durations + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg duration.Duration - // col points to the output vector we are updating. - col coldata.Durations - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2520,15 +2520,15 @@ func (a *minIntervalOrderedAggAlloc) newAggFunc() AggregateFunc { type minDatumOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.DatumVec + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg interface{} - // col points to the output vector we are updating. - col coldata.DatumVec - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -2781,15 +2781,15 @@ func (a *minDatumOrderedAggAlloc) newAggFunc() AggregateFunc { type maxBoolOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Bools + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg bool - // col points to the output vector we are updating. - col coldata.Bools - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -3052,15 +3052,15 @@ func (a *maxBoolOrderedAggAlloc) newAggFunc() AggregateFunc { type maxBytesOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col *coldata.Bytes + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg []byte - // col points to the output vector we are updating. - col *coldata.Bytes - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -3299,15 +3299,15 @@ func (a *maxBytesOrderedAggAlloc) newAggFunc() AggregateFunc { type maxDecimalOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Decimals + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg apd.Decimal - // col points to the output vector we are updating. - col coldata.Decimals - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -3538,15 +3538,15 @@ func (a *maxDecimalOrderedAggAlloc) newAggFunc() AggregateFunc { type maxInt16OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int16s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int16 - // col points to the output vector we are updating. - col coldata.Int16s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -3821,15 +3821,15 @@ func (a *maxInt16OrderedAggAlloc) newAggFunc() AggregateFunc { type maxInt32OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int32s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int32 - // col points to the output vector we are updating. - col coldata.Int32s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -4104,15 +4104,15 @@ func (a *maxInt32OrderedAggAlloc) newAggFunc() AggregateFunc { type maxInt64OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Int64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg int64 - // col points to the output vector we are updating. - col coldata.Int64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -4387,15 +4387,15 @@ func (a *maxInt64OrderedAggAlloc) newAggFunc() AggregateFunc { type maxFloat64OrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Float64s + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg float64 - // col points to the output vector we are updating. - col coldata.Float64s - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -4702,15 +4702,15 @@ func (a *maxFloat64OrderedAggAlloc) newAggFunc() AggregateFunc { type maxTimestampOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Times + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg time.Time - // col points to the output vector we are updating. - col coldata.Times - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -4969,15 +4969,15 @@ func (a *maxTimestampOrderedAggAlloc) newAggFunc() AggregateFunc { type maxIntervalOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.Durations + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg duration.Duration - // col points to the output vector we are updating. - col coldata.Durations - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool @@ -5208,15 +5208,15 @@ func (a *maxIntervalOrderedAggAlloc) newAggFunc() AggregateFunc { type maxDatumOrderedAgg struct { orderedAggregateFuncBase + // col points to the output vector we are updating. + col coldata.DatumVec + // vec is the same as col before conversion from coldata.Vec. + vec coldata.Vec allocator *colmem.Allocator // curAgg holds the running min/max, so we can index into the slice once per // group, instead of on each iteration. // NOTE: if foundNonNullForCurrentGroup is false, curAgg is undefined. curAgg interface{} - // col points to the output vector we are updating. - col coldata.DatumVec - // vec is the same as col before conversion from coldata.Vec. - vec coldata.Vec // foundNonNullForCurrentGroup tracks if we have seen any non-null values // for the group that is currently being aggregated. foundNonNullForCurrentGroup bool diff --git a/pkg/sql/colexec/distinct.eg.go b/pkg/sql/colexec/distinct.eg.go index 3ccf747acbc5..3efcfadc3b32 100644 --- a/pkg/sql/colexec/distinct.eg.go +++ b/pkg/sql/colexec/distinct.eg.go @@ -269,22 +269,23 @@ func newPartitioner(t *types.T) (partitioner, error) { // true to the resultant bool column for every value that differs from the // previous one. type distinctBoolOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal bool + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal bool lastValNull bool } @@ -748,22 +749,23 @@ func (p partitionerBool) partition(colVec coldata.Vec, outputCol []bool, n int) // true to the resultant bool column for every value that differs from the // previous one. type distinctBytesOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal []byte + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal []byte lastValNull bool } @@ -1165,22 +1167,23 @@ func (p partitionerBytes) partition(colVec coldata.Vec, outputCol []bool, n int) // true to the resultant bool column for every value that differs from the // previous one. type distinctDecimalOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal apd.Decimal + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal apd.Decimal lastValNull bool } @@ -1580,22 +1583,23 @@ func (p partitionerDecimal) partition(colVec coldata.Vec, outputCol []bool, n in // true to the resultant bool column for every value that differs from the // previous one. type distinctInt16Op struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal int16 + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal int16 lastValNull bool } @@ -2083,22 +2087,23 @@ func (p partitionerInt16) partition(colVec coldata.Vec, outputCol []bool, n int) // true to the resultant bool column for every value that differs from the // previous one. type distinctInt32Op struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal int32 + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal int32 lastValNull bool } @@ -2586,22 +2591,23 @@ func (p partitionerInt32) partition(colVec coldata.Vec, outputCol []bool, n int) // true to the resultant bool column for every value that differs from the // previous one. type distinctInt64Op struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal int64 + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal int64 lastValNull bool } @@ -3089,22 +3095,23 @@ func (p partitionerInt64) partition(colVec coldata.Vec, outputCol []bool, n int) // true to the resultant bool column for every value that differs from the // previous one. type distinctFloat64Op struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal float64 + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal float64 lastValNull bool } @@ -3656,22 +3663,23 @@ func (p partitionerFloat64) partition(colVec coldata.Vec, outputCol []bool, n in // true to the resultant bool column for every value that differs from the // previous one. type distinctTimestampOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal time.Time + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal time.Time lastValNull bool } @@ -4127,22 +4135,23 @@ func (p partitionerTimestamp) partition(colVec coldata.Vec, outputCol []bool, n // true to the resultant bool column for every value that differs from the // previous one. type distinctIntervalOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal duration.Duration + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal duration.Duration lastValNull bool } @@ -4542,22 +4551,23 @@ func (p partitionerInterval) partition(colVec coldata.Vec, outputCol []bool, n i // true to the resultant bool column for every value that differs from the // previous one. type distinctDatumOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal interface{} + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal interface{} lastValNull bool } diff --git a/pkg/sql/colexec/distinct_tmpl.go b/pkg/sql/colexec/distinct_tmpl.go index 36df5a956759..64dc7cf15347 100644 --- a/pkg/sql/colexec/distinct_tmpl.go +++ b/pkg/sql/colexec/distinct_tmpl.go @@ -182,22 +182,23 @@ func newPartitioner(t *types.T) (partitioner, error) { // true to the resultant bool column for every value that differs from the // previous one. type distinct_TYPEOp struct { + // outputCol is the boolean output column. It is shared by all of the + // other distinct operators in a distinct operator set. + outputCol []bool + + // lastVal is the last value seen by the operator, so that the distincting + // still works across batch boundaries. + lastVal _GOTYPE + OneInputNode // distinctColIdx is the index of the column to distinct upon. distinctColIdx int - // outputCol is the boolean output column. It is shared by all of the - // other distinct operators in a distinct operator set. - outputCol []bool - // Set to true at runtime when we've seen the first row. Distinct always // outputs the first row that it sees. foundFirstRow bool - // lastVal is the last value seen by the operator, so that the distincting - // still works across batch boundaries. - lastVal _GOTYPE lastValNull bool } diff --git a/pkg/sql/colexec/op_creation.go b/pkg/sql/colexec/op_creation.go index 2510318f81a7..2667eed34158 100644 --- a/pkg/sql/colexec/op_creation.go +++ b/pkg/sql/colexec/op_creation.go @@ -42,6 +42,16 @@ type NewColOperatorArgs struct { ExprHelper *ExprHelper Factory coldata.ColumnFactory TestingKnobs struct { + // SpillingCallbackFn will be called when the spilling from an in-memory to + // disk-backed operator occurs. It should only be set in tests. + SpillingCallbackFn func() + // NumForcedRepartitions specifies a number of "repartitions" that a + // disk-backed operator should be forced to perform. "Repartition" can mean + // different things depending on the operator (for example, for hash joiner + // it is dividing original partition into multiple new partitions; for + // sorter it is merging already created partitions into new one before + // proceeding to the next partition from the input). + NumForcedRepartitions int // UseStreamingMemAccountForBuffering specifies whether to use // StreamingMemAccount when creating buffering operators and should only be // set to 'true' in tests. The idea behind this flag is reducing the number @@ -50,19 +60,9 @@ type NewColOperatorArgs struct { // infrastructure (and so that we could use testMemAccount defined in // main_test.go). UseStreamingMemAccountForBuffering bool - // SpillingCallbackFn will be called when the spilling from an in-memory to - // disk-backed operator occurs. It should only be set in tests. - SpillingCallbackFn func() // DiskSpillingDisabled specifies whether only in-memory operators should // be created. DiskSpillingDisabled bool - // NumForcedRepartitions specifies a number of "repartitions" that a - // disk-backed operator should be forced to perform. "Repartition" can mean - // different things depending on the operator (for example, for hash joiner - // it is dividing original partition into multiple new partitions; for - // sorter it is merging already created partitions into new one before - // proceeding to the next partition from the input). - NumForcedRepartitions int // DelegateFDAcquisitions should be observed by users of a // PartitionedDiskQueue. During normal operations, these should acquire the // maximum number of file descriptors they will use from FDSemaphore up diff --git a/pkg/sql/execinfra/base.go b/pkg/sql/execinfra/base.go index ee12bec0f990..625a954abeb0 100644 --- a/pkg/sql/execinfra/base.go +++ b/pkg/sql/execinfra/base.go @@ -402,8 +402,6 @@ func (rb *rowSourceBase) consumerClosed(name string) { // RowChannel is a thin layer over a RowChannelMsg channel, which can be used to // transfer rows between goroutines. type RowChannel struct { - rowSourceBase - types []*types.T // The channel on which rows are delivered. @@ -412,6 +410,8 @@ type RowChannel struct { // dataChan is the same channel as C. dataChan chan RowChannelMsg + rowSourceBase + // numSenders is an atomic counter that keeps track of how many senders have // yet to call ProducerDone(). numSenders int32 diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index 1e4324484cfa..349a9a3839aa 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -1957,24 +1957,25 @@ var BinOps = map[BinaryOperator]binOpOverload{ // CmpOp is a comparison operator. type CmpOp struct { + types TypeList + LeftType *types.T RightType *types.T + // Datum return type is a union between *DBool and dNull. + Fn TwoArgFn + + // counter, if non-nil, should be incremented every time the + // operator is type checked. + counter telemetry.Counter + // If NullableArgs is false, the operator returns NULL // whenever either argument is NULL. NullableArgs bool - // Datum return type is a union between *DBool and dNull. - Fn TwoArgFn - Volatility Volatility - types TypeList isPreferred bool - - // counter, if non-nil, should be incremented every time the - // operator is type checked. - counter telemetry.Counter } func (op *CmpOp) params() TypeList { diff --git a/pkg/sql/sem/tree/select.go b/pkg/sql/sem/tree/select.go index cdfe0e2c859b..f05888fbcc0f 100644 --- a/pkg/sql/sem/tree/select.go +++ b/pkg/sql/sem/tree/select.go @@ -77,14 +77,14 @@ func (node *ParenSelect) Format(ctx *FmtCtx) { // SelectClause represents a SELECT statement. type SelectClause struct { - Distinct bool + From From DistinctOn DistinctOn Exprs SelectExprs - From From - Where *Where GroupBy GroupBy - Having *Where Window Window + Having *Where + Where *Where + Distinct bool TableSelect bool } diff --git a/pkg/sql/sessiondata/session_data.go b/pkg/sql/sessiondata/session_data.go index 1f09fb07e161..a392c02ab747 100644 --- a/pkg/sql/sessiondata/session_data.go +++ b/pkg/sql/sessiondata/session_data.go @@ -110,35 +110,30 @@ func (s *SessionData) GetLocation() *time.Location { // execution on the gateway node and don't need to be propagated to the remote // nodes. type LocalOnlySessionData struct { - // DefaultTxnPriority indicates the default priority of newly created - // transactions. - // NOTE: we'd prefer to use tree.UserPriority here, but doing so would - // introduce a package dependency cycle. - DefaultTxnPriority int - // DefaultReadOnly indicates the default read-only status of newly created - // transactions. - DefaultReadOnly bool - // DistSQLMode indicates whether to run queries using the distributed - // execution engine. - DistSQLMode DistSQLExecMode + // SaveTablesPrefix indicates that a table should be created with the + // given prefix for the output of each subexpression in a query. If + // SaveTablesPrefix is empty, no tables are created. + SaveTablesPrefix string + // RemoteAddr is used to generate logging events. + RemoteAddr net.Addr + // VectorizeRowCountThreshold indicates the row count above which the + // vectorized execution engine will be used if possible. + VectorizeRowCountThreshold uint64 // ExperimentalDistSQLPlanningMode indicates whether the experimental // DistSQL planning driven by the optimizer is enabled. ExperimentalDistSQLPlanningMode ExperimentalDistSQLPlanningMode - // PartiallyDistributedPlansDisabled indicates whether the partially - // distributed plans produced by distSQLSpecExecFactory are disabled. It - // should be set to 'true' only in tests that verify that the old and the - // new factories return exactly the same physical plans. - // TODO(yuzefovich): remove it when deleting old sql.execFactory. - PartiallyDistributedPlansDisabled bool + // DistSQLMode indicates whether to run queries using the distributed + // execution engine. + DistSQLMode DistSQLExecMode // OptimizerFKCascadesLimit is the maximum number of cascading operations that // are run for a single query. OptimizerFKCascadesLimit int - // OptimizerUseHistograms indicates whether we should use histograms for - // cardinality estimation in the optimizer. - OptimizerUseHistograms bool - // OptimizerUseMultiColStats indicates whether we should use multi-column - // statistics for cardinality estimation in the optimizer. - OptimizerUseMultiColStats bool + // ResultsBufferSize specifies the size at which the pgwire results buffer + // will self-flush. + ResultsBufferSize int64 + // NoticeDisplaySeverity indicates the level of Severity to send notices for the given + // session. + NoticeDisplaySeverity pgnotice.DisplaySeverity // SerialNormalizationMode indicates how to handle the SERIAL pseudo-type. SerialNormalizationMode SerialNormalizationMode // DatabaseIDToTempSchemaID stores the temp schema ID for every database that @@ -155,42 +150,50 @@ type LocalOnlySessionData struct { // idle in a transaction before the session is canceled. // If set to 0, there is no timeout. IdleInTransactionSessionTimeout time.Duration + // ReorderJoinsLimit indicates the number of joins at which the optimizer should + // stop attempting to reorder. + ReorderJoinsLimit int + // DefaultTxnPriority indicates the default priority of newly created + // transactions. + // NOTE: we'd prefer to use tree.UserPriority here, but doing so would + // introduce a package dependency cycle. + DefaultTxnPriority int + // DefaultReadOnly indicates the default read-only status of newly created + // transactions. + DefaultReadOnly bool + // PartiallyDistributedPlansDisabled indicates whether the partially + // distributed plans produced by distSQLSpecExecFactory are disabled. It + // should be set to 'true' only in tests that verify that the old and the + // new factories return exactly the same physical plans. + // TODO(yuzefovich): remove it when deleting old sql.execFactory. + PartiallyDistributedPlansDisabled bool + // OptimizerUseHistograms indicates whether we should use histograms for + // cardinality estimation in the optimizer. + OptimizerUseHistograms bool + // OptimizerUseMultiColStats indicates whether we should use multi-column + // statistics for cardinality estimation in the optimizer. + OptimizerUseMultiColStats bool // SafeUpdates causes errors when the client // sends syntax that may have unwanted side effects. SafeUpdates bool // PreferLookupJoinsForFKs causes foreign key operations to prefer lookup // joins. PreferLookupJoinsForFKs bool - // RemoteAddr is used to generate logging events. - RemoteAddr net.Addr // ZigzagJoinEnabled indicates whether the optimizer should try and plan a // zigzag join. ZigzagJoinEnabled bool - // ReorderJoinsLimit indicates the number of joins at which the optimizer should - // stop attempting to reorder. - ReorderJoinsLimit int // RequireExplicitPrimaryKeys indicates whether CREATE TABLE statements should // error out if no primary key is provided. RequireExplicitPrimaryKeys bool - // VectorizeRowCountThreshold indicates the row count above which the - // vectorized execution engine will be used if possible. - VectorizeRowCountThreshold uint64 // ForceSavepointRestart overrides the default SAVEPOINT behavior // for compatibility with certain ORMs. When this flag is set, // the savepoint name will no longer be compared against the magic // identifier `cockroach_restart` in order use a restartable // transaction. ForceSavepointRestart bool - // ResultsBufferSize specifies the size at which the pgwire results buffer - // will self-flush. - ResultsBufferSize int64 // AllowPrepareAsOptPlan must be set to allow use of // PREPARE name AS OPT PLAN '...' AllowPrepareAsOptPlan bool - // SaveTablesPrefix indicates that a table should be created with the - // given prefix for the output of each subexpression in a query. If - // SaveTablesPrefix is empty, no tables are created. - SaveTablesPrefix string // TempTablesEnabled indicates whether temporary tables can be created or not. TempTablesEnabled bool // HashShardedIndexesEnabled indicates whether hash sharded indexes can be created. @@ -204,9 +207,6 @@ type LocalOnlySessionData struct { // InsertFastPath is true if the fast path for insert (with VALUES input) may // be used. InsertFastPath bool - // NoticeDisplaySeverity indicates the level of Severity to send notices for the given - // session. - NoticeDisplaySeverity pgnotice.DisplaySeverity // AlterColumnTypeGeneralEnabled is true if ALTER TABLE ... ALTER COLUMN ... // TYPE x may be used for general conversions requiring online schema change/ AlterColumnTypeGeneralEnabled bool