Skip to content

Commit

Permalink
MultiBucketsAggregation.Bucket does not implement ToXContent anymore (e…
Browse files Browse the repository at this point in the history
…lastic#117240) (elastic#117306)

This change makes some buckets implementation leaner.
  • Loading branch information
iverase authored Nov 22, 2024
1 parent 950b360 commit 27af37b
Show file tree
Hide file tree
Showing 52 changed files with 157 additions and 389 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,12 @@ public InternalAggregations getAggregations() {
return aggregations;
}

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
private void bucketToXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(CommonFields.KEY.getPreferredName(), key);
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
return builder;
}

@Override
Expand Down Expand Up @@ -237,7 +235,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.startArray(CommonFields.BUCKETS.getPreferredName());
for (InternalBucket bucket : buckets) {
bucket.toXContent(builder, params);
bucket.bucketToXContent(builder, params);
}
builder.endArray();
return builder;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,7 @@ public Object getKey() {
return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC);
}

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
private void bucketToXContent(XContentBuilder builder, Params params, DocValueFormat format) throws IOException {
String keyAsString = format.format(key).toString();
builder.startObject();
if (format != DocValueFormat.RAW) {
Expand All @@ -110,7 +109,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
return builder;
}

@Override
Expand Down Expand Up @@ -597,7 +595,7 @@ private BucketReduceResult mergeConsecutiveBuckets(
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
builder.startArray(CommonFields.BUCKETS.getPreferredName());
for (Bucket bucket : buckets) {
bucket.toXContent(builder, params);
bucket.bucketToXContent(builder, params, format);
}
builder.endArray();
builder.field("interval", getInterval().toString());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,21 @@ public class InternalTimeSeries extends InternalMultiBucketAggregation<InternalT
*/
public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket {
protected long bucketOrd;
protected final boolean keyed;
protected final BytesRef key;
// TODO: make computing docCount optional
protected long docCount;
protected InternalAggregations aggregations;

public InternalBucket(BytesRef key, long docCount, InternalAggregations aggregations, boolean keyed) {
public InternalBucket(BytesRef key, long docCount, InternalAggregations aggregations) {
this.key = key;
this.docCount = docCount;
this.aggregations = aggregations;
this.keyed = keyed;
}

/**
* Read from a stream.
*/
public InternalBucket(StreamInput in, boolean keyed) throws IOException {
this.keyed = keyed;
public InternalBucket(StreamInput in) throws IOException {
key = in.readBytesRef();
docCount = in.readVLong();
aggregations = InternalAggregations.readFrom(in);
Expand Down Expand Up @@ -86,8 +83,7 @@ public InternalAggregations getAggregations() {
return aggregations;
}

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
private void bucketToXContent(XContentBuilder builder, Params params, boolean keyed) throws IOException {
// Use map key in the xcontent response:
var key = getKey();
if (keyed) {
Expand All @@ -99,7 +95,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
aggregations.toXContentInternal(builder, params);
builder.endObject();
return builder;
}

@Override
Expand All @@ -112,14 +107,13 @@ public boolean equals(Object other) {
}
InternalTimeSeries.InternalBucket that = (InternalTimeSeries.InternalBucket) other;
return Objects.equals(key, that.key)
&& Objects.equals(keyed, that.keyed)
&& Objects.equals(docCount, that.docCount)
&& Objects.equals(aggregations, that.aggregations);
}

@Override
public int hashCode() {
return Objects.hash(getClass(), key, keyed, docCount, aggregations);
return Objects.hash(getClass(), key, docCount, aggregations);
}
}

Expand All @@ -143,7 +137,7 @@ public InternalTimeSeries(StreamInput in) throws IOException {
int size = in.readVInt();
List<InternalTimeSeries.InternalBucket> buckets = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
buckets.add(new InternalTimeSeries.InternalBucket(in, keyed));
buckets.add(new InternalTimeSeries.InternalBucket(in));
}
this.buckets = buckets;
this.bucketMap = null;
Expand All @@ -162,7 +156,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
builder.startArray(CommonFields.BUCKETS.getPreferredName());
}
for (InternalBucket bucket : buckets) {
bucket.toXContent(builder, params);
bucket.bucketToXContent(builder, params, keyed);
}
if (keyed) {
builder.endObject();
Expand Down Expand Up @@ -252,14 +246,14 @@ public InternalTimeSeries create(List<InternalBucket> buckets) {

@Override
public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) {
return new InternalBucket(prototype.key, prototype.docCount, aggregations, prototype.keyed);
return new InternalBucket(prototype.key, prototype.docCount, aggregations);
}

private InternalBucket reduceBucket(List<InternalBucket> buckets, AggregationReduceContext context) {
InternalTimeSeries.InternalBucket reduced = null;
for (InternalTimeSeries.InternalBucket bucket : buckets) {
if (reduced == null) {
reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed);
reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations);
} else {
reduced.docCount += bucket.docCount;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,7 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
InternalTimeSeries.InternalBucket bucket = new InternalTimeSeries.InternalBucket(
BytesRef.deepCopyOf(spare), // Closing bucketOrds will corrupt the bytes ref, so need to make a deep copy here.
docCount,
null,
keyed
null
);
bucket.bucketOrd = ordsEnum.ord();
buckets.add(bucket);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ private List<InternalBucket> randomBuckets(boolean keyed, InternalAggregations a
}
try {
var key = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef();
bucketList.add(new InternalBucket(key, docCount, aggregations, keyed));
bucketList.add(new InternalBucket(key, docCount, aggregations));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
Expand Down Expand Up @@ -108,29 +108,29 @@ public void testReduceSimple() {
InternalTimeSeries first = new InternalTimeSeries(
"ts",
List.of(
new InternalBucket(new BytesRef("1"), 3, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("10"), 6, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("2"), 2, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("9"), 5, InternalAggregations.EMPTY, false)
new InternalBucket(new BytesRef("1"), 3, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("10"), 6, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("2"), 2, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("9"), 5, InternalAggregations.EMPTY)
),
false,
Map.of()
);
InternalTimeSeries second = new InternalTimeSeries(
"ts",
List.of(
new InternalBucket(new BytesRef("2"), 1, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("3"), 3, InternalAggregations.EMPTY, false)
new InternalBucket(new BytesRef("2"), 1, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("3"), 3, InternalAggregations.EMPTY)
),
false,
Map.of()
);
InternalTimeSeries third = new InternalTimeSeries(
"ts",
List.of(
new InternalBucket(new BytesRef("1"), 2, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("3"), 4, InternalAggregations.EMPTY, false),
new InternalBucket(new BytesRef("9"), 4, InternalAggregations.EMPTY, false)
new InternalBucket(new BytesRef("1"), 2, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("3"), 4, InternalAggregations.EMPTY),
new InternalBucket(new BytesRef("9"), 4, InternalAggregations.EMPTY)
),
false,
Map.of()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,19 +176,19 @@ public void testMultiBucketAggregationAsSubAggregation() throws IOException {
InternalDateHistogram byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=xxx}").getAggregations().get("by_timestamp");
assertThat(
byTimeStampBucket.getBuckets(),
contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY))
contains(new InternalDateHistogram.Bucket(startTime, 2, null, InternalAggregations.EMPTY))
);
assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L));
byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=yyy}").getAggregations().get("by_timestamp");
assertThat(
byTimeStampBucket.getBuckets(),
contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY))
contains(new InternalDateHistogram.Bucket(startTime, 2, null, InternalAggregations.EMPTY))
);
assertThat(ts.getBucketByKey("{dim1=bbb, dim2=zzz}").docCount, equalTo(4L));
byTimeStampBucket = ts.getBucketByKey("{dim1=bbb, dim2=zzz}").getAggregations().get("by_timestamp");
assertThat(
byTimeStampBucket.getBuckets(),
contains(new InternalDateHistogram.Bucket(startTime, 4, false, null, InternalAggregations.EMPTY))
contains(new InternalDateHistogram.Bucket(startTime, 4, null, InternalAggregations.EMPTY))
);
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import org.elasticsearch.search.aggregations.Aggregation;
import org.elasticsearch.search.aggregations.HasAggregations;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.xcontent.ToXContent;

import java.util.List;

Expand All @@ -24,7 +23,7 @@ public interface MultiBucketsAggregation extends Aggregation {
* A bucket represents a criteria to which all documents that fall in it adhere to. It is also uniquely identified
* by a key, and can potentially hold sub-aggregations computed over all documents in it.
*/
interface Bucket extends HasAggregations, ToXContent {
interface Bucket extends HasAggregations {
/**
* @return The key associated with the bucket
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -465,14 +465,6 @@ public int compareKey(InternalBucket other) {
return 0;
}

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
/**
* See {@link CompositeAggregation#bucketToXContent}
*/
throw new UnsupportedOperationException("not implemented");
}

InternalBucket finalizeSampling(SamplingContext samplingContext) {
return new InternalBucket(
sourceNames,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,15 +215,9 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw
filters.size() + (otherBucketKey == null ? 0 : 1),
(offsetInOwningOrd, docCount, subAggregationResults) -> {
if (offsetInOwningOrd < filters.size()) {
return new InternalFilters.InternalBucket(
filters.get(offsetInOwningOrd).key(),
docCount,
subAggregationResults,
keyed,
keyedBucket
);
return new InternalFilters.InternalBucket(filters.get(offsetInOwningOrd).key(), docCount, subAggregationResults);
}
return new InternalFilters.InternalBucket(otherBucketKey, docCount, subAggregationResults, keyed, keyedBucket);
return new InternalFilters.InternalBucket(otherBucketKey, docCount, subAggregationResults);
},
buckets -> new InternalFilters(name, buckets, keyed, keyedBucket, metadata())
);
Expand All @@ -234,12 +228,12 @@ public InternalAggregation buildEmptyAggregation() {
InternalAggregations subAggs = buildEmptySubAggregations();
List<InternalFilters.InternalBucket> buckets = new ArrayList<>(filters.size() + (otherBucketKey == null ? 0 : 1));
for (QueryToFilterAdapter filter : filters) {
InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs, keyed, keyedBucket);
InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs);
buckets.add(bucket);
}

if (otherBucketKey != null) {
InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(otherBucketKey, 0, subAggs, keyed, keyedBucket);
InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(otherBucketKey, 0, subAggs);
buckets.add(bucket);
}

Expand Down
Loading

0 comments on commit 27af37b

Please sign in to comment.