From 295ee9f59995559924ae849efd4c6c94300c47e7 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 15 Mar 2019 12:22:04 +0100 Subject: [PATCH] Serialize top-level pipeline aggs as part of InternalAggregations We currently convert pipeline aggregators to their corresponding InternalAggregation instance as part of the final reduction phase. They arrive to the coordinating node as part of QuerySearchResult objects fom the shards and, despite we may incrementally reduce aggs (hence we may have some non-final reduce and the final one later) all the reduction phases happen on the same node. With CCS minimizing roundtrips though, each cluster performs its own non-final reduction, and then serializes the results back to the CCS coordinating node which will perform the final coordination. This breaks the assumptions made up until now around reductions happening all on the same node. With #40101 we have made sure that top-level pipeline aggs are not reduced as part of the non-final reduction. The next step is to make sure that they don't get lost, meaning that each coordinating node needs to send them back to the CCS coordinating node as part of the top-level `InternalAggregations` object. Closes #40059 --- .../aggregations/InternalAggregations.java | 69 ++++++-- .../InternalAggregationsTests.java | 160 ++++++++++++++++++ 2 files changed, 216 insertions(+), 13 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 69adb79cb2b84..fe3e2105a8f8f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -18,10 +18,12 @@ */ package org.elasticsearch.search.aggregations; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import java.io.IOException; @@ -49,6 +51,8 @@ public final class InternalAggregations extends Aggregations implements Streamab } }; + private List topLevelPipelineAggregators; + private InternalAggregations() { } @@ -60,18 +64,42 @@ public InternalAggregations(List aggregations) { } /** - * Reduces the given list of aggregations + * Constructs a new aggregation providing its {@link InternalAggregation}s and {@link SiblingPipelineAggregator}s + */ + public InternalAggregations(List aggregations, List topLevelPipelineAggregators) { + super(aggregations); + this.topLevelPipelineAggregators = topLevelPipelineAggregators; + } + + /** + * Returns the top-level pipeline aggregators. + * Note that top-level pipeline aggregators become normal aggregation once the final reduction has been performed, after which they + * become part of the list of {@link InternalAggregation}s. */ - public static InternalAggregations reduce(List aggregationsList, ReduceContext context) { - return reduce(aggregationsList, null, context); + List getTopLevelPipelineAggregators() { + return topLevelPipelineAggregators; } /** - * Reduces the given list of aggregations as well as the provided sibling pipeline aggregators. - * Note that sibling pipeline aggregators are ignored when non final reduction is performed. + * Reduces the given list of aggregations as well as the top-level pipeline aggregators extracted from the first + * {@link InternalAggregations} object found in the list. + * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched. */ public static InternalAggregations reduce(List aggregationsList, - List siblingPipelineAggregators, + ReduceContext context) { + if (aggregationsList.isEmpty()) { + return null; + } + InternalAggregations first = aggregationsList.get(0); + return reduce(aggregationsList, first.topLevelPipelineAggregators, context); + } + + /** + * Reduces the given list of aggregations as well as the provided top-level pipeline aggregators. + * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched. + */ + public static InternalAggregations reduce(List aggregationsList, + List topLevelPipelineAggregators, ReduceContext context) { if (aggregationsList.isEmpty()) { return null; @@ -98,15 +126,14 @@ public static InternalAggregations reduce(List aggregation reducedAggregations.add(first.reduce(aggregations, context)); } - if (siblingPipelineAggregators != null) { - if (context.isFinalReduce()) { - for (SiblingPipelineAggregator pipelineAggregator : siblingPipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(reducedAggregations), context); - reducedAggregations.add(newAgg); - } + if (topLevelPipelineAggregators != null && context.isFinalReduce()) { + for (SiblingPipelineAggregator pipelineAggregator : topLevelPipelineAggregators) { + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(reducedAggregations), context); + reducedAggregations.add(newAgg); } + return new InternalAggregations(reducedAggregations); } - return new InternalAggregations(reducedAggregations); + return new InternalAggregations(reducedAggregations, topLevelPipelineAggregators); } public static InternalAggregations readAggregations(StreamInput in) throws IOException { @@ -121,11 +148,27 @@ public void readFrom(StreamInput in) throws IOException { if (aggregations.isEmpty()) { aggregationsAsMap = emptyMap(); } + //TODO update version after backport + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.readBoolean()) { + this.topLevelPipelineAggregators = in.readList( + stream -> (SiblingPipelineAggregator)in.readNamedWriteable(PipelineAggregator.class)); + } + } } @Override @SuppressWarnings("unchecked") public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteableList((List)aggregations); + //TODO update version after backport + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (topLevelPipelineAggregators == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeNamedWriteableList(topLevelPipelineAggregators); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java new file mode 100644 index 0000000000000..433fcd895821d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java @@ -0,0 +1,160 @@ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTermsTests; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValueTests; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +import static java.util.Collections.emptyList; + +public class InternalAggregationsTests extends ESTestCase { + + private final NamedWriteableRegistry registry = new NamedWriteableRegistry( + new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()); + + public void testReduceEmptyAggs() { + List aggs = Collections.emptyList(); + List topLevelPipelineAggs = randomBoolean() ? Collections.emptyList() : null; + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, randomBoolean()); + assertNull(InternalAggregations.reduce(aggs, topLevelPipelineAggs, reduceContext)); + } + + public void testNonFinalReduceTopLevelPipelineAggs() throws IOException { + InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), + 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); + List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms))); + List topLevelPipelineAggs = new ArrayList<>(); + MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); + topLevelPipelineAggs.add((SiblingPipelineAggregator)maxBucketPipelineAggregationBuilder.create()); + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, false); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, topLevelPipelineAggs, reduceContext); + assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); + assertEquals(1, reducedAggs.aggregations.size()); + } + + public void testFinalReduceTopLevelPipelineAggs() throws IOException { + InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), + 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); + + MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); + SiblingPipelineAggregator siblingPipelineAggregator = (SiblingPipelineAggregator) maxBucketPipelineAggregationBuilder.create(); + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, true); + final InternalAggregations reducedAggs; + if (randomBoolean()) { + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms), + Collections.singletonList(siblingPipelineAggregator)); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); + } else { + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms)); + List topLevelPipelineAggs = Collections.singletonList(siblingPipelineAggregator); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), topLevelPipelineAggs, reduceContext); + } + assertNull(reducedAggs.getTopLevelPipelineAggregators()); + assertEquals(2, reducedAggs.aggregations.size()); + } + + public void testSerialization() throws Exception { + List aggsList = new ArrayList<>(); + if (randomBoolean()) { + StringTermsTests stringTermsTests = new StringTermsTests(); + stringTermsTests.init(); + stringTermsTests.setUp(); + aggsList.add(stringTermsTests.createTestInstance()); + } + if (randomBoolean()) { + InternalDateHistogramTests dateHistogramTests = new InternalDateHistogramTests(); + dateHistogramTests.setUp(); + aggsList.add(dateHistogramTests.createTestInstance()); + } + if (randomBoolean()) { + InternalSimpleValueTests simpleValueTests = new InternalSimpleValueTests(); + aggsList.add(simpleValueTests.createTestInstance()); + } + List topLevelPipelineAggs = null; + if (randomBoolean()) { + topLevelPipelineAggs = new ArrayList<>(); + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); + } + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); + } + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); + } + } + + Version version = VersionUtils.randomVersion(random()); + + InternalAggregations aggregations = new InternalAggregations(aggsList, topLevelPipelineAggs); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(version); + aggregations.writeTo(out); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytesRef().bytes), registry)) { + in.setVersion(version); + InternalAggregations deserialized = InternalAggregations.readAggregations(in); + assertEquals(aggregations.aggregations, deserialized.aggregations); + if (aggregations.getTopLevelPipelineAggregators() == null) { + assertNull(deserialized.getTopLevelPipelineAggregators()); + } else { + //TODO update version after backport + if (version.onOrAfter(Version.V_8_0_0)) { + assertNotNull(deserialized.getTopLevelPipelineAggregators()); + assertEquals(aggregations.getTopLevelPipelineAggregators().size(), deserialized.getTopLevelPipelineAggregators().size()); + for (int i = 0; i < aggregations.getTopLevelPipelineAggregators().size(); i++) { + SiblingPipelineAggregator siblingPipelineAggregator1 = aggregations.getTopLevelPipelineAggregators().get(i); + SiblingPipelineAggregator siblingPipelineAggregator2 = deserialized.getTopLevelPipelineAggregators().get(i); + assertArrayEquals(siblingPipelineAggregator1.bucketsPaths(), siblingPipelineAggregator2.bucketsPaths()); + assertEquals(siblingPipelineAggregator1.name(), siblingPipelineAggregator2.name()); + } + } else { + assertNull(deserialized.getTopLevelPipelineAggregators()); + } + } + } + } + } + + //TODO update version and rename after backport + public void testSerializationFromPre_8_0_0() throws IOException { + String aggsString = "AwZzdGVybXMFb0F0Q0EKCQVsZG5ncgAFeG56RWcFeUFxVmcABXBhQVVpBUtYc2VIAAVaclRESwVqUkxySAAFelp5d1AFRUREcEYABW1" + + "sckF0BU5wWWVFAAVJYVJmZgVURlJVbgAFT0RiU04FUWNwSVoABU1sb09HBUNzZHFlAAVWWmJHaQABAwGIDgNyYXcFAQAADmRhdGVfaGlzdG9ncmFt" + + "BVhHbVl4/wADAAKAurcDA1VUQwABAQAAAWmOhukAAQAAAWmR9dEAAAAAAAAAAAAAAANyYXcACAAAAWmQrDoAUQAAAAFpkRoXAEMAAAABaZGH9AAtA" + + "AAAAWmR9dEAJwAAAAFpkmOuAFwAAAABaZLRiwAYAAAAAWmTP2gAKgAAAAFpk61FABsADHNpbXBsZV92YWx1ZQVsWVNLVv8AB2RlY2ltYWwGIyMjLi" + + "MjQLZWZVy5zBYAAAAAAAAAAAAAAAAAAAAAAAAA"; + + byte[] aggsBytes = Base64.getDecoder().decode(aggsString); + try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(aggsBytes), registry)) { + in.setVersion(VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), + Version.max(Version.CURRENT.minimumCompatibilityVersion(), VersionUtils.getPreviousVersion(Version.CURRENT)))); + InternalAggregations deserialized = InternalAggregations.readAggregations(in); + assertEquals(3, deserialized.aggregations.size()); + assertThat(deserialized.aggregations.get(0), Matchers.instanceOf(StringTerms.class)); + assertThat(deserialized.aggregations.get(1), Matchers.instanceOf(InternalDateHistogram.class)); + assertThat(deserialized.aggregations.get(2), Matchers.instanceOf(InternalSimpleValue.class)); + } + } +}