From e7d352b489272a87b05019869857656de7fb82fc Mon Sep 17 00:00:00 2001 From: qwerty4030 Date: Thu, 11 May 2017 10:06:26 -0700 Subject: [PATCH] Compound order for histogram aggregations. (#22343) This commit adds support for histogram and date_histogram agg compound order by refactoring and reusing terms agg order code. The major change is that the Terms.Order and Histogram.Order classes have been replaced/refactored into a new class BucketOrder. This is a breaking change for the Java Transport API. For backward compatibility with previous ES versions the (date)histogram compound order will use the first order. Also the _term and _time aggregation order keys have been deprecated; replaced by _key. Relates to #20003: now that all these aggregations use the same order code, it should be easier to move validation to parse time (as a follow up PR). Relates to #14771: histogram and date_histogram aggregation order will now be validated at reduce time. Closes #23613: if a single BucketOrder that is not a tie-breaker is added with the Java Transport API, it will be converted into a CompoundOrder with a tie-breaker. --- .../search/aggregations/BucketOrder.java | 127 ++++ .../search/aggregations/InternalOrder.java | 595 ++++++++++++++++++ .../search/aggregations/KeyComparable.java | 41 ++ .../bucket/MultiBucketsAggregation.java | 27 - .../DateHistogramAggregationBuilder.java | 78 +-- .../histogram/DateHistogramAggregator.java | 10 +- .../DateHistogramAggregatorFactory.java | 5 +- .../bucket/histogram/Histogram.java | 81 --- .../HistogramAggregationBuilder.java | 78 +-- .../bucket/histogram/HistogramAggregator.java | 10 +- .../histogram/HistogramAggregatorFactory.java | 7 +- .../histogram/InternalDateHistogram.java | 26 +- .../bucket/histogram/InternalHistogram.java | 26 +- .../bucket/histogram/InternalOrder.java | 135 ---- .../terms/AbstractStringTermsAggregator.java | 3 +- .../bucket/terms/DoubleTerms.java | 7 +- .../bucket/terms/DoubleTermsAggregator.java | 3 +- .../GlobalOrdinalsStringTermsAggregator.java | 23 +- .../bucket/terms/InternalMappedTerms.java | 5 +- .../bucket/terms/InternalOrder.java | 385 ------------ .../bucket/terms/InternalTerms.java | 17 +- .../aggregations/bucket/terms/LongTerms.java | 7 +- .../bucket/terms/LongTermsAggregator.java | 6 +- .../bucket/terms/StringTerms.java | 7 +- .../bucket/terms/StringTermsAggregator.java | 6 +- .../aggregations/bucket/terms/Terms.java | 86 --- .../bucket/terms/TermsAggregationBuilder.java | 76 +-- .../bucket/terms/TermsAggregator.java | 69 +- .../bucket/terms/TermsAggregatorFactory.java | 20 +- .../bucket/terms/UnmappedTerms.java | 3 +- .../aggregations/support/AggregationPath.java | 29 +- .../query/MoreLikeThisQueryBuilderTests.java | 13 + .../FunctionScoreQueryBuilderTests.java | 9 + .../aggregations/InternalOrderTests.java | 158 +++++ .../aggregations/bucket/DateHistogramIT.java | 204 +++++- .../bucket/DateHistogramTests.java | 41 +- .../bucket/DiversifiedSamplerIT.java | 3 +- .../aggregations/bucket/DoubleTermsIT.java | 89 ++- .../aggregations/bucket/HistogramIT.java | 212 ++++++- .../aggregations/bucket/HistogramTests.java | 57 +- .../aggregations/bucket/LongTermsIT.java | 79 ++- .../aggregations/bucket/MinDocCountIT.java | 81 +-- .../aggregations/bucket/NaNSortingIT.java | 5 +- .../aggregations/bucket/ReverseNestedIT.java | 5 +- .../search/aggregations/bucket/SamplerIT.java | 3 +- .../aggregations/bucket/ShardSizeTermsIT.java | 31 +- .../aggregations/bucket/StringTermsIT.java | 111 ++-- .../bucket/TermsDocCountErrorIT.java | 78 +-- .../bucket/TermsShardMinDocCountIT.java | 5 +- .../aggregations/bucket/TermsTests.java | 22 +- .../histogram/InternalDateHistogramTests.java | 3 +- .../histogram/InternalHistogramTests.java | 3 +- .../bucket/terms/DoubleTermsTests.java | 3 +- .../bucket/terms/LongTermsTests.java | 3 +- .../bucket/terms/StringTermsTests.java | 3 +- .../bucket/terms/TermsAggregatorTests.java | 5 +- .../search/aggregations/metrics/AvgIT.java | 4 +- .../aggregations/metrics/ExtendedStatsIT.java | 5 +- .../metrics/HDRPercentileRanksIT.java | 6 +- .../metrics/HDRPercentilesIT.java | 6 +- .../search/aggregations/metrics/MaxIT.java | 4 +- .../search/aggregations/metrics/MinIT.java | 4 +- .../search/aggregations/metrics/StatsIT.java | 4 +- .../search/aggregations/metrics/SumIT.java | 4 +- .../metrics/TDigestPercentileRanksIT.java | 6 +- .../metrics/TDigestPercentilesIT.java | 6 +- .../aggregations/metrics/TopHitsIT.java | 8 +- .../aggregations/pipeline/AvgBucketIT.java | 10 +- .../pipeline/ExtendedStatsBucketIT.java | 12 +- .../aggregations/pipeline/MaxBucketIT.java | 10 +- .../aggregations/pipeline/MinBucketIT.java | 10 +- .../pipeline/PercentilesBucketIT.java | 13 +- .../aggregations/pipeline/StatsBucketIT.java | 10 +- .../aggregations/pipeline/SumBucketIT.java | 10 +- .../bucket/datehistogram-aggregation.asciidoc | 4 + .../bucket/histogram-aggregation.asciidoc | 4 + .../bucket/terms-aggregation.asciidoc | 30 +- .../bucket/datehistogram-aggregation.asciidoc | 7 + .../bucket/histogram-aggregation.asciidoc | 116 +--- .../bucket/terms-aggregation.asciidoc | 3 +- .../migration/migrate_6_0/java.asciidoc | 10 +- .../test/search.aggregation/10_histogram.yaml | 63 ++ .../test/search.aggregation/20_terms.yaml | 54 ++ .../test/AbstractSerializingTestCase.java | 4 +- .../test/AbstractWireSerializingTestCase.java | 6 +- .../org/elasticsearch/test/ESTestCase.java | 34 +- 86 files changed, 2260 insertions(+), 1431 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java create mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java create mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java delete mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java delete mode 100644 core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java create mode 100644 core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java new file mode 100644 index 0000000000000..482a90f08526c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.support.AggregationPath; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +/** + * {@link Bucket} Ordering strategy. + */ +public abstract class BucketOrder implements ToXContentObject, Writeable { + + /** + * Creates a bucket ordering strategy that sorts buckets by their document counts (ascending or descending). + * + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + */ + public static BucketOrder count(boolean asc) { + return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC; + } + + /** + * Creates a bucket ordering strategy that sorts buckets by their keys (ascending or descending). This may be + * used as a tie-breaker to avoid non-deterministic ordering. + * + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + */ + public static BucketOrder key(boolean asc) { + return asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC; + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on a single-valued sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + public static BucketOrder aggregation(String path, boolean asc) { + return new InternalOrder.Aggregation(path, asc); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on a metric from a multi-valued sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param metricName name of the value of the multi-value metric to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + public static BucketOrder aggregation(String path, String metricName, boolean asc) { + return new InternalOrder.Aggregation(path + "." + metricName, asc); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on multiple criteria. A tie-breaker may be added to + * avoid non-deterministic ordering. + * + * @param orders a list of {@link BucketOrder} objects to sort on, in order of priority. + */ + public static BucketOrder compound(List orders) { + return new InternalOrder.CompoundOrder(orders); + } + + /** + * Creates a bucket ordering strategy which sorts buckets based on multiple criteria. A tie-breaker may be added to + * avoid non-deterministic ordering. + * + * @param orders a list of {@link BucketOrder} parameters to sort on, in order of priority. + */ + public static BucketOrder compound(BucketOrder... orders) { + return compound(Arrays.asList(orders)); + } + + /** + * @return A comparator for the bucket based on the given aggregator. The comparator is used in two phases: + *

+ * - aggregation phase, where each shard builds a list of buckets to be sent to the coordinating node. + * In this phase, the passed in aggregator will be the aggregator that aggregates the buckets on the + * shard level. + *

+ * - reduce phase, where the coordinating node gathers all the buckets from all the shards and reduces them + * to a final bucket list. In this case, the passed in aggregator will be {@code null}. + */ + public abstract Comparator comparator(Aggregator aggregator); + + /** + * @return unique internal ID used for reading/writing this order from/to a stream. + * @see InternalOrder.Streams + */ + abstract byte id(); + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); + + @Override + public void writeTo(StreamOutput out) throws IOException { + InternalOrder.Streams.writeOrder(this, out); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java new file mode 100644 index 0000000000000..e81c0b1890bdc --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -0,0 +1,595 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.Comparators; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; +import org.elasticsearch.search.aggregations.support.AggregationPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; + +/** + * Implementations for {@link Bucket} ordering strategies. + */ +public class InternalOrder extends BucketOrder { + + private final byte id; + private final String key; + protected final boolean asc; + protected final Comparator comparator; + + /** + * Creates an ordering strategy that sorts {@link Bucket}s by some property. + * + * @param id unique ID for this ordering strategy. + * @param key key of the property to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @param comparator determines how buckets will be ordered. + */ + public InternalOrder(byte id, String key, boolean asc, Comparator comparator) { + this.id = id; + this.key = key; + this.asc = asc; + this.comparator = comparator; + } + + @Override + byte id() { + return id; + } + + @Override + public Comparator comparator(Aggregator aggregator) { + return comparator; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); + } + + /** + * Validate a bucket ordering strategy for an {@link Aggregator}. + * + * @param order bucket ordering strategy to sort on. + * @param aggregator aggregator to sort. + * @return unmodified bucket ordering strategy. + * @throws AggregationExecutionException if validation fails + */ + public static BucketOrder validate(BucketOrder order, Aggregator aggregator) throws AggregationExecutionException { + if (order instanceof CompoundOrder) { + for (BucketOrder innerOrder : ((CompoundOrder) order).orderElements) { + validate(innerOrder, aggregator); + } + } else if (order instanceof Aggregation) { + ((Aggregation) order).path().validate(aggregator); + } + return order; + } + + /** + * {@link Bucket} ordering strategy to sort by a sub-aggregation. + */ + public static class Aggregation extends InternalOrder { + + static final byte ID = 0; + + /** + * Create a new ordering strategy to sort by a sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + Aggregation(String path, boolean asc) { + super(ID, path, asc, new AggregationComparator(path, asc)); + } + + /** + * @return parsed path to the sub-aggregation to sort on. + */ + public AggregationPath path() { + return ((AggregationComparator) comparator).path; + } + + @Override + public Comparator comparator(Aggregator aggregator) { + if (aggregator instanceof TermsAggregator) { + // Internal Optimization for terms aggregation to avoid constructing buckets for ordering purposes + return ((TermsAggregator) aggregator).bucketComparator(path(), asc); + } + return comparator; + } + + /** + * {@link Bucket} ordering strategy to sort by a sub-aggregation. + */ + static class AggregationComparator implements Comparator { + + private final AggregationPath path; + private final boolean asc; + + /** + * Create a new {@link Bucket} ordering strategy to sort by a sub-aggregation. + * + * @param path path to the sub-aggregation to sort on. + * @param asc direction to sort by: {@code true} for ascending, {@code false} for descending. + * @see AggregationPath + */ + AggregationComparator(String path, boolean asc) { + this.asc = asc; + this.path = AggregationPath.parse(path); + } + + @Override + public int compare(Bucket b1, Bucket b2) { + double v1 = path.resolveValue(b1); + double v2 = path.resolveValue(b2); + return Comparators.compareDiscardNaN(v1, v2, asc); + } + } + } + + /** + * {@link Bucket} ordering strategy to sort by multiple criteria. + */ + public static class CompoundOrder extends BucketOrder { + + static final byte ID = -1; + + final List orderElements; + + /** + * Create a new ordering strategy to sort by multiple criteria. A tie-breaker may be added to avoid + * non-deterministic ordering. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + */ + CompoundOrder(List compoundOrder) { + this(compoundOrder, true); + } + + /** + * Create a new ordering strategy to sort by multiple criteria. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + * @param absoluteOrdering {@code true} to add a tie-breaker to avoid non-deterministic ordering if needed, + * {@code false} otherwise. + */ + CompoundOrder(List compoundOrder, boolean absoluteOrdering) { + this.orderElements = new LinkedList<>(compoundOrder); + BucketOrder lastElement = null; + for (BucketOrder order : orderElements) { + if (order instanceof CompoundOrder) { + throw new IllegalArgumentException("nested compound order not supported"); + } + lastElement = order; + } + if (absoluteOrdering && isKeyOrder(lastElement) == false) { + // add key order ascending as a tie-breaker to avoid non-deterministic ordering + // if all user provided comparators return 0. + this.orderElements.add(KEY_ASC); + } + } + + @Override + byte id() { + return ID; + } + + /** + * @return unmodifiable list of {@link BucketOrder}s to sort on. + */ + public List orderElements() { + return Collections.unmodifiableList(orderElements); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (BucketOrder order : orderElements) { + order.toXContent(builder, params); + } + return builder.endArray(); + } + + @Override + public Comparator comparator(Aggregator aggregator) { + return new CompoundOrderComparator(orderElements, aggregator); + } + + @Override + public int hashCode() { + return Objects.hash(orderElements); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CompoundOrder other = (CompoundOrder) obj; + return Objects.equals(orderElements, other.orderElements); + } + + /** + * {@code Comparator} for sorting buckets by multiple criteria. + */ + static class CompoundOrderComparator implements Comparator { + + private List compoundOrder; + private Aggregator aggregator; + + /** + * Create a new {@code Comparator} for sorting buckets by multiple criteria. + * + * @param compoundOrder a list of {@link BucketOrder}s to sort on, in order of priority. + * @param aggregator {@link BucketOrder#comparator(Aggregator)} + */ + CompoundOrderComparator(List compoundOrder, Aggregator aggregator) { + this.compoundOrder = compoundOrder; + this.aggregator = aggregator; + } + + @Override + public int compare(Bucket b1, Bucket b2) { + int result = 0; + for (Iterator itr = compoundOrder.iterator(); itr.hasNext() && result == 0; ) { + result = itr.next().comparator(aggregator).compare(b1, b2); + } + return result; + } + } + } + + private static final byte COUNT_DESC_ID = 1; + private static final byte COUNT_ASC_ID = 2; + private static final byte KEY_DESC_ID = 3; + private static final byte KEY_ASC_ID = 4; + + /** + * Order by the (higher) count of each bucket. + */ + static final InternalOrder COUNT_DESC = new InternalOrder(COUNT_DESC_ID, "_count", false, comparingCounts().reversed()); + + /** + * Order by the (lower) count of each bucket. + */ + static final InternalOrder COUNT_ASC = new InternalOrder(COUNT_ASC_ID, "_count", true, comparingCounts()); + + /** + * Order by the key of each bucket descending. + */ + static final InternalOrder KEY_DESC = new InternalOrder(KEY_DESC_ID, "_key", false, comparingKeys().reversed()); + + /** + * Order by the key of each bucket ascending. + */ + static final InternalOrder KEY_ASC = new InternalOrder(KEY_ASC_ID, "_key", true, comparingKeys()); + + /** + * @return compare by {@link Bucket#getDocCount()}. + */ + private static Comparator comparingCounts() { + return Comparator.comparingLong(Bucket::getDocCount); + } + + /** + * @return compare by {@link Bucket#getKey()} from the appropriate implementation. + */ + @SuppressWarnings("unchecked") + private static Comparator comparingKeys() { + return (b1, b2) -> { + if (b1 instanceof KeyComparable) { + return ((KeyComparable) b1).compareKey(b2); + } + throw new IllegalStateException("Unexpected order bucket class [" + b1.getClass() + "]"); + }; + } + + /** + * Determine if the ordering strategy is sorting on bucket count descending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket count descending, {@code false} otherwise. + */ + public static boolean isCountDesc(BucketOrder order) { + return isOrder(order, COUNT_DESC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key (ascending or descending). + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key, {@code false} otherwise. + */ + public static boolean isKeyOrder(BucketOrder order) { + return isOrder(order, KEY_ASC) || isOrder(order, KEY_DESC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key ascending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key ascending, {@code false} otherwise. + */ + public static boolean isKeyAsc(BucketOrder order) { + return isOrder(order, KEY_ASC); + } + + /** + * Determine if the ordering strategy is sorting on bucket key descending. + * + * @param order bucket ordering strategy to check. + * @return {@code true} if the ordering strategy is sorting on bucket key descending, {@code false} otherwise. + */ + public static boolean isKeyDesc(BucketOrder order) { + return isOrder(order, KEY_DESC); + } + + /** + * Determine if the ordering strategy matches the expected one. + * + * @param order bucket ordering strategy to check. If this is a {@link CompoundOrder} the first element will be + * check instead. + * @param expected expected bucket ordering strategy. + * @return {@code true} if the order matches, {@code false} otherwise. + */ + private static boolean isOrder(BucketOrder order, BucketOrder expected) { + if (order == expected) { + return true; + } else if (order instanceof CompoundOrder) { + // check if its a compound order with the first element that matches + List orders = ((CompoundOrder) order).orderElements; + if (orders.size() >= 1) { + return isOrder(orders.get(0), expected); + } + } + return false; + } + + /** + * Contains logic for reading/writing {@link BucketOrder} from/to streams. + */ + public static class Streams { + + /** + * Read a {@link BucketOrder} from a {@link StreamInput}. + * + * @param in stream with order data to read. + * @return order read from the stream + * @throws IOException on error reading from the stream. + */ + public static BucketOrder readOrder(StreamInput in) throws IOException { + byte id = in.readByte(); + switch (id) { + case COUNT_DESC_ID: return COUNT_DESC; + case COUNT_ASC_ID: return COUNT_ASC; + case KEY_DESC_ID: return KEY_DESC; + case KEY_ASC_ID: return KEY_ASC; + case Aggregation.ID: + boolean asc = in.readBoolean(); + String key = in.readString(); + return new Aggregation(key, asc); + case CompoundOrder.ID: + int size = in.readVInt(); + List compoundOrder = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + compoundOrder.add(Streams.readOrder(in)); + } + return new CompoundOrder(compoundOrder, false); + default: + throw new RuntimeException("unknown order id [" + id + "]"); + } + } + + /** + * ONLY FOR HISTOGRAM ORDER: Backwards compatibility logic to read a {@link BucketOrder} from a {@link StreamInput}. + * + * @param in stream with order data to read. + * @param bwcOrderFlag {@code true} to check {@code in.readBoolean()} in the backwards compat logic before reading + * the order. {@code false} to skip this flag (order always present). + * @return order read from the stream + * @throws IOException on error reading from the stream. + */ + public static BucketOrder readHistogramOrder(StreamInput in, boolean bwcOrderFlag) throws IOException { + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) { + return Streams.readOrder(in); + } else { // backwards compat logic + if (bwcOrderFlag == false || in.readBoolean()) { + // translate the old histogram order IDs to the new order objects + byte id = in.readByte(); + switch (id) { + case 1: return KEY_ASC; + case 2: return KEY_DESC; + case 3: return COUNT_ASC; + case 4: return COUNT_DESC; + case 0: // aggregation order stream logic is backwards compatible + boolean asc = in.readBoolean(); + String key = in.readString(); + return new Aggregation(key, asc); + default: // not expecting compound order ID + throw new RuntimeException("unknown histogram order id [" + id + "]"); + } + } else { // default to _key asc if no order specified + return KEY_ASC; + } + } + } + + /** + * Write a {@link BucketOrder} to a {@link StreamOutput}. + * + * @param order order to write to the stream. + * @param out stream to write the order to. + * @throws IOException on error writing to the stream. + */ + public static void writeOrder(BucketOrder order, StreamOutput out) throws IOException { + out.writeByte(order.id()); + if (order instanceof Aggregation) { + Aggregation aggregationOrder = (Aggregation) order; + out.writeBoolean(aggregationOrder.asc); + out.writeString(aggregationOrder.path().toString()); + } else if (order instanceof CompoundOrder) { + CompoundOrder compoundOrder = (CompoundOrder) order; + out.writeVInt(compoundOrder.orderElements.size()); + for (BucketOrder innerOrder : compoundOrder.orderElements) { + innerOrder.writeTo(out); + } + } + } + + /** + * ONLY FOR HISTOGRAM ORDER: Backwards compatibility logic to write a {@link BucketOrder} to a stream. + * + * @param order order to write to the stream. + * @param out stream to write the order to. + * @param bwcOrderFlag {@code true} to always {@code out.writeBoolean(true)} for the backwards compat logic before + * writing the order. {@code false} to skip this flag. + * @throws IOException on error writing to the stream. + */ + public static void writeHistogramOrder(BucketOrder order, StreamOutput out, boolean bwcOrderFlag) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) { + order.writeTo(out); + } else { // backwards compat logic + if(bwcOrderFlag) { // need to add flag that determines if order exists + out.writeBoolean(true); // order always exists + } + if (order instanceof CompoundOrder) { + // older versions do not support histogram compound order; the best we can do here is use the first order. + order = ((CompoundOrder) order).orderElements.get(0); + } + if (order instanceof Aggregation) { + // aggregation order stream logic is backwards compatible + order.writeTo(out); + } else { + // convert the new order IDs to the old histogram order IDs. + byte id; + switch (order.id()) { + case COUNT_DESC_ID: id = 4; break; + case COUNT_ASC_ID: id = 3; break; + case KEY_DESC_ID: id = 2; break; + case KEY_ASC_ID: id = 1; break; + default: throw new RuntimeException("unknown order id [" + order.id() + "]"); + } + out.writeByte(id); + } + } + } + } + + /** + * Contains logic for parsing a {@link BucketOrder} from a {@link XContentParser}. + */ + public static class Parser { + + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(Parser.class)); + + /** + * Parse a {@link BucketOrder} from {@link XContent}. + * + * @param parser for parsing {@link XContent} that contains the order. + * @param context parsing context. + * @return bucket ordering strategy + * @throws IOException on error a {@link XContent} parsing error. + */ + public static BucketOrder parseOrderParam(XContentParser parser, QueryParseContext context) throws IOException { + XContentParser.Token token; + String orderKey = null; + boolean orderAsc = false; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + orderKey = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + String dir = parser.text(); + if ("asc".equalsIgnoreCase(dir)) { + orderAsc = true; + } else if ("desc".equalsIgnoreCase(dir)) { + orderAsc = false; + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unknown order direction [" + dir + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unexpected token [" + token + "] for [order]"); + } + } + if (orderKey == null) { + throw new ParsingException(parser.getTokenLocation(), + "Must specify at least one field for [order]"); + } + // _term and _time order deprecated in 6.0; replaced by _key + if ("_term".equals(orderKey) || "_time".equals(orderKey)) { + DEPRECATION_LOGGER.deprecated("Deprecated aggregation order key [{}] used, replaced by [_key]", orderKey); + } + switch (orderKey) { + case "_term": + case "_time": + case "_key": + return orderAsc ? KEY_ASC : KEY_DESC; + case "_count": + return orderAsc ? COUNT_ASC : COUNT_DESC; + default: // assume all other orders are sorting on a sub-aggregation. Validation occurs later. + return aggregation(orderKey, orderAsc); + } + } + } + + @Override + public int hashCode() { + return Objects.hash(id, key, asc); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + InternalOrder other = (InternalOrder) obj; + return Objects.equals(id, other.id) + && Objects.equals(key, other.key) + && Objects.equals(asc, other.asc); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java b/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java new file mode 100644 index 0000000000000..c69cf0689c608 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/aggregations/KeyComparable.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +/** + * Defines behavior for comparing {@link Bucket#getKey() bucket keys} to imposes a total ordering + * of buckets of the same type. + * + * @param {@link Bucket} of the same type that also implements {@link KeyComparable}. + * @see BucketOrder#key(boolean) + */ +public interface KeyComparable> { + + /** + * Compare this {@link Bucket}s {@link Bucket#getKey() key} with another bucket. + * + * @param other the bucket that contains the key to compare to. + * @return a negative integer, zero, or a positive integer as this buckets key + * is less than, equal to, or greater than the other buckets key. + * @see Comparable#compareTo(Object) + */ + int compareKey(T other); +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java index fc223916f72c1..24b1894455a4b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java @@ -19,12 +19,10 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.common.util.Comparators; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.HasAggregations; -import org.elasticsearch.search.aggregations.support.AggregationPath; import java.util.List; @@ -58,31 +56,6 @@ interface Bucket extends HasAggregations, ToXContent { @Override Aggregations getAggregations(); - class SubAggregationComparator implements java.util.Comparator { - - private final AggregationPath path; - private final boolean asc; - - public SubAggregationComparator(String expression, boolean asc) { - this.asc = asc; - this.path = AggregationPath.parse(expression); - } - - public boolean asc() { - return asc; - } - - public AggregationPath path() { - return path; - } - - @Override - public int compare(B b1, B b2) { - double v1 = path.resolveValue(b1); - double v2 = path.resolveValue(b2); - return Comparators.compareDiscardNaN(v1, v2, asc); - } - } } /** diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 96b08e3bada12..4bbe2c5abe165 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.rounding.DateTimeUnit; @@ -28,10 +27,12 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -44,6 +45,7 @@ import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -113,8 +115,8 @@ public class DateHistogramAggregationBuilder PARSER.declareField(DateHistogramAggregationBuilder::extendedBounds, parser -> ExtendedBounds.PARSER.apply(parser, null), ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT); - PARSER.declareField(DateHistogramAggregationBuilder::order, DateHistogramAggregationBuilder::parseOrder, - Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(DateHistogramAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, + Histogram.ORDER_FIELD); } public static DateHistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException { @@ -125,7 +127,7 @@ public static DateHistogramAggregationBuilder parse(String aggregationName, Quer private DateHistogramInterval dateHistogramInterval; private long offset = 0; private ExtendedBounds extendedBounds; - private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; + private BucketOrder order = BucketOrder.key(true); private boolean keyed = false; private long minDocCount = 0; @@ -137,9 +139,7 @@ public DateHistogramAggregationBuilder(String name) { /** Read from a stream, for internal use only. */ public DateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in, ValuesSourceType.NUMERIC, ValueType.DATE); - if (in.readBoolean()) { - order = InternalOrder.Streams.readOrder(in); - } + order = InternalOrder.Streams.readHistogramOrder(in, true); keyed = in.readBoolean(); minDocCount = in.readVLong(); interval = in.readLong(); @@ -150,11 +150,7 @@ public DateHistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { - boolean hasOrder = order != null; - out.writeBoolean(hasOrder); - if (hasOrder) { - InternalOrder.Streams.writeOrder(order, out); - } + InternalOrder.Streams.writeHistogramOrder(order, out, true); out.writeBoolean(keyed); out.writeVLong(minDocCount); out.writeLong(interval); @@ -244,17 +240,34 @@ public DateHistogramAggregationBuilder extendedBounds(ExtendedBounds extendedBou } /** Return the order to use to sort buckets of this histogram. */ - public Histogram.Order order() { + public BucketOrder order() { return order; } /** Set a new order on this builder and return the builder so that calls - * can be chained. */ - public DateHistogramAggregationBuilder order(Histogram.Order order) { + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public DateHistogramAggregationBuilder order(BucketOrder order) { if (order == null) { throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); } - this.order = (InternalOrder) order; + if(order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } + return this; + } + + /** + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. + */ + public DateHistogramAggregationBuilder order(List orders) { + if (orders == null) { + throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); + } + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } @@ -370,35 +383,4 @@ protected boolean innerEquals(Object obj) { && Objects.equals(offset, other.offset) && Objects.equals(extendedBounds, other.extendedBounds); } - - // similar to the parsing oh histogram orders, but also accepts _time as an alias for _key - private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException { - InternalOrder order = null; - Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - String dir = parser.text(); - boolean asc = "asc".equals(dir); - if (!asc && !"desc".equals(dir)) { - throw new ParsingException(parser.getTokenLocation(), "Unknown order direction: [" + dir - + "]. Should be either [asc] or [desc]"); - } - order = resolveOrder(currentFieldName, asc); - } - } - return order; - } - - static InternalOrder resolveOrder(String key, boolean asc) { - if ("_key".equals(key) || "_time".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC); - } - if ("_count".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC); - } - return new InternalOrder.Aggregation(key, asc); - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 871cade50cd71..f5f7877572a16 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -53,7 +55,7 @@ class DateHistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; @@ -62,7 +64,7 @@ class DateHistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; private long offset; - DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order, + DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, BucketOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, @@ -71,7 +73,7 @@ class DateHistogramAggregator extends BucketsAggregator { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; this.offset = offset; - this.order = order; + this.order = InternalOrder.validate(order, this);; this.keyed = keyed; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; @@ -131,7 +133,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator()); + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 44bb3e02afece..a64e018288879 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -40,14 +41,14 @@ public final class DateHistogramAggregatorFactory private final DateHistogramInterval dateHistogramInterval; private final long interval; private final long offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final ExtendedBounds extendedBounds; private Rounding rounding; public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long interval, - DateHistogramInterval dateHistogramInterval, long offset, InternalOrder order, boolean keyed, long minDocCount, + DateHistogramInterval dateHistogramInterval, long offset, BucketOrder order, boolean keyed, long minDocCount, Rounding rounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java index 3ac87de81ed23..07e2eb879623c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/Histogram.java @@ -19,10 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import java.util.Comparator; import java.util.List; /** @@ -50,83 +48,4 @@ interface Bucket extends MultiBucketsAggregation.Bucket { @Override List getBuckets(); - /** - * A strategy defining the order in which the buckets in this histogram are ordered. - */ - abstract class Order implements ToXContent { - - private static int compareKey(Histogram.Bucket b1, Histogram.Bucket b2) { - if (b1 instanceof InternalHistogram.Bucket) { - return Double.compare(((InternalHistogram.Bucket) b1).key, ((InternalHistogram.Bucket) b2).key); - } else if (b1 instanceof InternalDateHistogram.Bucket) { - return Long.compare(((InternalDateHistogram.Bucket) b1).key, ((InternalDateHistogram.Bucket) b2).key); - } else { - throw new IllegalStateException("Unexpected impl: " + b1.getClass()); - } - } - - public static final Order KEY_ASC = new InternalOrder((byte) 1, "_key", true, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - return compareKey(b1, b2); - } - }); - - public static final Order KEY_DESC = new InternalOrder((byte) 2, "_key", false, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - return compareKey(b2, b1); - } - }); - - public static final Order COUNT_ASC = new InternalOrder((byte) 3, "_count", true, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - int cmp = Long.compare(b1.getDocCount(), b2.getDocCount()); - if (cmp == 0) { - cmp = compareKey(b1, b2); - } - return cmp; - } - }); - - - public static final Order COUNT_DESC = new InternalOrder((byte) 4, "_count", false, new Comparator() { - @Override - public int compare(Histogram.Bucket b1, Histogram.Bucket b2) { - int cmp = Long.compare(b2.getDocCount(), b1.getDocCount()); - if (cmp == 0) { - cmp = compareKey(b1, b2); - } - return cmp; - } - }); - - /** - * Creates a bucket ordering strategy that sorts buckets based on a single-valued calc sug-aggregation - * - * @param path the name of the aggregation - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String path, boolean asc) { - return new InternalOrder.Aggregation(path, asc); - } - - /** - * Creates a bucket ordering strategy that sorts buckets based on a multi-valued calc sug-aggregation - * - * @param aggregationName the name of the aggregation - * @param valueName The name of the value of the multi-value get by which the sorting will be applied - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String aggregationName, String valueName, boolean asc) { - return new InternalOrder.Aggregation(aggregationName + "." + valueName, asc); - } - - /** - * @return The bucket comparator by which the order will be applied. - */ - abstract Comparator comparator(); - - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 87c7404c088ba..9362c0b8f779c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -20,16 +20,16 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -41,6 +41,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Objects; /** @@ -75,8 +76,8 @@ public class HistogramAggregationBuilder histogram.extendedBounds(extendedBounds[0], extendedBounds[1]); }, parser -> EXTENDED_BOUNDS_PARSER.apply(parser, null), ExtendedBounds.EXTENDED_BOUNDS_FIELD, ObjectParser.ValueType.OBJECT); - PARSER.declareField(HistogramAggregationBuilder::order, HistogramAggregationBuilder::parseOrder, - Histogram.ORDER_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(HistogramAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, + Histogram.ORDER_FIELD); } public static HistogramAggregationBuilder parse(String aggregationName, QueryParseContext context) throws IOException { @@ -87,7 +88,7 @@ public static HistogramAggregationBuilder parse(String aggregationName, QueryPar private double offset = 0; private double minBound = Double.POSITIVE_INFINITY; private double maxBound = Double.NEGATIVE_INFINITY; - private InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; + private BucketOrder order = BucketOrder.key(true); private boolean keyed = false; private long minDocCount = 0; @@ -99,9 +100,7 @@ public HistogramAggregationBuilder(String name) { /** Read from a stream, for internal use only. */ public HistogramAggregationBuilder(StreamInput in) throws IOException { super(in, ValuesSourceType.NUMERIC, ValueType.DOUBLE); - if (in.readBoolean()) { - order = InternalOrder.Streams.readOrder(in); - } + order = InternalOrder.Streams.readHistogramOrder(in, true); keyed = in.readBoolean(); minDocCount = in.readVLong(); interval = in.readDouble(); @@ -112,11 +111,7 @@ public HistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { - boolean hasOrder = order != null; - out.writeBoolean(hasOrder); - if (hasOrder) { - InternalOrder.Streams.writeOrder(order, out); - } + InternalOrder.Streams.writeHistogramOrder(order, out, true); out.writeBoolean(keyed); out.writeVLong(minDocCount); out.writeDouble(interval); @@ -185,17 +180,34 @@ public HistogramAggregationBuilder extendedBounds(double minBound, double maxBou } /** Return the order to use to sort buckets of this histogram. */ - public Histogram.Order order() { + public BucketOrder order() { return order; } /** Set a new order on this builder and return the builder so that calls - * can be chained. */ - public HistogramAggregationBuilder order(Histogram.Order order) { + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public HistogramAggregationBuilder order(BucketOrder order) { if (order == null) { throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); } - this.order = (InternalOrder) order; + if(order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } + return this; + } + + /** + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. + */ + public HistogramAggregationBuilder order(List orders) { + if (orders == null) { + throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); + } + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } @@ -286,34 +298,4 @@ protected boolean innerEquals(Object obj) { && Objects.equals(minBound, other.minBound) && Objects.equals(maxBound, other.maxBound); } - - private static InternalOrder parseOrder(XContentParser parser, QueryParseContext context) throws IOException { - InternalOrder order = null; - Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - String dir = parser.text(); - boolean asc = "asc".equals(dir); - if (!asc && !"desc".equals(dir)) { - throw new ParsingException(parser.getTokenLocation(), "Unknown order direction: [" + dir - + "]. Should be either [asc] or [desc]"); - } - order = resolveOrder(currentFieldName, asc); - } - } - return order; - } - - static InternalOrder resolveOrder(String key, boolean asc) { - if ("_key".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.KEY_ASC : InternalOrder.KEY_DESC); - } - if ("_count".equals(key)) { - return (InternalOrder) (asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC); - } - return new InternalOrder.Aggregation(key, asc); - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index a3a038cfa3cae..0c2ba554c0b9f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -34,6 +34,8 @@ import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -54,7 +56,7 @@ class HistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final double interval, offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final double minBound, maxBound; @@ -62,7 +64,7 @@ class HistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, - InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -73,7 +75,7 @@ class HistogramAggregator extends BucketsAggregator { } this.interval = interval; this.offset = offset; - this.order = order; + this.order = InternalOrder.validate(order, this); this.keyed = keyed; this.minDocCount = minDocCount; this.minBound = minBound; @@ -137,7 +139,7 @@ public InternalAggregation buildAggregation(long bucket) throws IOException { } // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator()); + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); EmptyBucketInfo emptyBucketInfo = null; if (minDocCount == 0) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 939210b63a699..c478d1262eac9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,13 +37,13 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory { private final double interval, offset; - private final InternalOrder order; + private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final double minBound, maxBound; HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, - InternalOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, + BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); @@ -80,4 +81,4 @@ protected Aggregator createUnmapped(Aggregator parent, List throws IOException { return createAggregator(null, parent, pipelineAggregators, metaData); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index dc05ab51e8ad7..c3eab06f28ae8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -31,6 +31,9 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -49,7 +52,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { + public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { final long key; final long docCount; @@ -151,6 +154,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public int compareKey(Bucket other) { + return Long.compare(key, other.key); + } + public DocValueFormat getFormatter() { return format; } @@ -206,14 +214,14 @@ public int hashCode() { } private final List buckets; - private final InternalOrder order; + private final BucketOrder order; private final DocValueFormat format; private final boolean keyed; private final long minDocCount; private final long offset; private final EmptyBucketInfo emptyBucketInfo; - InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, long offset, + InternalDateHistogram(String name, List buckets, BucketOrder order, long minDocCount, long offset, EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { @@ -233,7 +241,7 @@ public int hashCode() { */ public InternalDateHistogram(StreamInput in) throws IOException { super(in); - order = InternalOrder.Streams.readOrder(in); + order = InternalOrder.Streams.readHistogramOrder(in, false); minDocCount = in.readVLong(); if (minDocCount == 0) { emptyBucketInfo = new EmptyBucketInfo(in); @@ -248,7 +256,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { - InternalOrder.Streams.writeOrder(order, out); + InternalOrder.Streams.writeHistogramOrder(order, out, false); out.writeVLong(minDocCount); if (minDocCount == 0) { emptyBucketInfo.writeTo(out); @@ -416,18 +424,18 @@ public InternalAggregation doReduce(List aggregations, Redu addEmptyBuckets(reducedBuckets, reduceContext); } - if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) { + if (InternalOrder.isKeyAsc(order) || reduceContext.isFinalReduce() == false) { // nothing to do, data are already sorted since shards return // sorted buckets and the merge-sort performed by reduceBuckets // maintains order - } else if (order == InternalOrder.KEY_DESC) { + } else if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... List reverse = new ArrayList<>(reducedBuckets); Collections.reverse(reverse); reducedBuckets = reverse; } else { - // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort - CollectionUtil.introSort(reducedBuckets, order.comparator()); + // sorted by compound order or sub-aggregation, need to fall back to a costly n*log(n) sort + CollectionUtil.introSort(reducedBuckets, order.comparator(null)); } return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index c6d2aa9eeb959..035dd9de05539 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -30,6 +30,9 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import java.io.IOException; import java.util.ArrayList; @@ -45,7 +48,7 @@ */ public final class InternalHistogram extends InternalMultiBucketAggregation implements Histogram, HistogramFactory { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { + public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { final double key; final long docCount; @@ -147,6 +150,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public int compareKey(Bucket other) { + return Double.compare(key, other.key); + } + public DocValueFormat getFormatter() { return format; } @@ -201,13 +209,13 @@ public int hashCode() { } private final List buckets; - private final InternalOrder order; + private final BucketOrder order; private final DocValueFormat format; private final boolean keyed; private final long minDocCount; private final EmptyBucketInfo emptyBucketInfo; - InternalHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, + InternalHistogram(String name, List buckets, BucketOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); @@ -225,7 +233,7 @@ public int hashCode() { */ public InternalHistogram(StreamInput in) throws IOException { super(in); - order = InternalOrder.Streams.readOrder(in); + order = InternalOrder.Streams.readHistogramOrder(in, false); minDocCount = in.readVLong(); if (minDocCount == 0) { emptyBucketInfo = new EmptyBucketInfo(in); @@ -239,7 +247,7 @@ public InternalHistogram(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { - InternalOrder.Streams.writeOrder(order, out); + InternalOrder.Streams.writeHistogramOrder(order, out, false); out.writeVLong(minDocCount); if (minDocCount == 0) { emptyBucketInfo.writeTo(out); @@ -400,18 +408,18 @@ public InternalAggregation doReduce(List aggregations, Redu addEmptyBuckets(reducedBuckets, reduceContext); } - if (order == InternalOrder.KEY_ASC || reduceContext.isFinalReduce() == false) { + if (InternalOrder.isKeyAsc(order) || reduceContext.isFinalReduce() == false) { // nothing to do, data are already sorted since shards return // sorted buckets and the merge-sort performed by reduceBuckets // maintains order - } else if (order == InternalOrder.KEY_DESC) { + } else if (InternalOrder.isKeyDesc(order)) { // we just need to reverse here... List reverse = new ArrayList<>(reducedBuckets); Collections.reverse(reverse); reducedBuckets = reverse; } else { - // sorted by sub-aggregation, need to fall back to a costly n*log(n) sort - CollectionUtil.introSort(reducedBuckets, order.comparator()); + // sorted by compound order or sub-aggregation, need to fall back to a costly n*log(n) sort + CollectionUtil.introSort(reducedBuckets, order.comparator(null)); } return new InternalHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, format, keyed, pipelineAggregators(), diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java deleted file mode 100644 index 5cf2f83baa850..0000000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; - -import java.io.IOException; -import java.util.Comparator; -import java.util.Objects; - -/** - * An internal {@link Histogram.Order} strategy which is identified by a unique id. - */ -class InternalOrder extends Histogram.Order { - - final byte id; - final String key; - final boolean asc; - final Comparator comparator; - - InternalOrder(byte id, String key, boolean asc, Comparator comparator) { - this.id = id; - this.key = key; - this.asc = asc; - this.comparator = comparator; - } - - byte id() { - return id; - } - - String key() { - return key; - } - - boolean asc() { - return asc; - } - - @Override - Comparator comparator() { - return comparator; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); - } - - @Override - public int hashCode() { - return Objects.hash(id, key, asc); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - InternalOrder other = (InternalOrder) obj; - return Objects.equals(id, other.id) - && Objects.equals(key, other.key) - && Objects.equals(asc, other.asc); - } - - static class Aggregation extends InternalOrder { - - static final byte ID = 0; - - Aggregation(String key, boolean asc) { - super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator(key, asc)); - } - - } - - static class Streams { - - /** - * Writes the given order to the given output (based on the id of the order). - */ - public static void writeOrder(InternalOrder order, StreamOutput out) throws IOException { - out.writeByte(order.id()); - if (order instanceof InternalOrder.Aggregation) { - out.writeBoolean(order.asc()); - out.writeString(order.key()); - } - } - - /** - * Reads an order from the given input (based on the id of the order). - * - * @see Streams#writeOrder(InternalOrder, org.elasticsearch.common.io.stream.StreamOutput) - */ - public static InternalOrder readOrder(StreamInput in) throws IOException { - byte id = in.readByte(); - switch (id) { - case 1: return (InternalOrder) Histogram.Order.KEY_ASC; - case 2: return (InternalOrder) Histogram.Order.KEY_DESC; - case 3: return (InternalOrder) Histogram.Order.COUNT_ASC; - case 4: return (InternalOrder) Histogram.Order.COUNT_DESC; - case 0: - boolean asc = in.readBoolean(); - String key = in.readString(); - return new InternalOrder.Aggregation(key, asc); - default: - throw new RuntimeException("unknown histogram order"); - } - } - - } - - -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java index 9b40fcc42dcc6..edbf2aef25fec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -37,7 +38,7 @@ abstract class AbstractStringTermsAggregator extends TermsAggregator { protected final boolean showTermDocCountError; AbstractStringTermsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, bucketCountThresholds, order, format, subAggCollectMode, pipelineAggregators, metaData); this.showTermDocCountError = showTermDocCountError; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index e4c7906f21586..e4885bc0539d5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.ArrayList; @@ -76,8 +77,8 @@ public Number getKeyAsNumber() { } @Override - public int compareTerm(Terms.Bucket other) { - return Double.compare(term, ((Number) other.getKey()).doubleValue()); + public int compareKey(Bucket other) { + return Double.compare(term, other.term); } @Override @@ -105,7 +106,7 @@ public int hashCode() { } } - public DoubleTerms(String name, Terms.Order order, int requiredSize, long minDocCount, List pipelineAggregators, + public DoubleTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index cf7478e319617..0ae42abd9a4d3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.internal.SearchContext; @@ -39,7 +40,7 @@ public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, + BucketOrder order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index f315d915f0dc6..33bbc370c6e76 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -43,6 +43,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -70,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected SortedSetDocValues globalOrds; public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -122,8 +123,8 @@ public void collect(int doc, long bucket) throws IOException { public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (ords.advanceExact(doc)) { - for (long globalOrd = ords.nextOrd(); - globalOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long globalOrd = ords.nextOrd(); + globalOrd != SortedSetDocValues.NO_MORE_ORDS; globalOrd = ords.nextOrd()) { collectExistingBucket(sub, doc, globalOrd); } @@ -218,8 +219,8 @@ static class OrdBucket extends InternalTerms.Bucket { } @Override - public int compareTerm(Terms.Bucket other) { - return Long.compare(globalOrd, ((OrdBucket) other).globalOrd); + public int compareKey(OrdBucket other) { + return Long.compare(globalOrd, other.globalOrd); } @Override @@ -261,7 +262,7 @@ public static class WithHash extends GlobalOrdinalsStringTermsAggregator { private final LongHash bucketOrds; - public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, Terms.Order order, + public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -296,8 +297,8 @@ public void collect(int doc, long bucket) throws IOException { @Override public void collect(int doc, long bucket) throws IOException { if (ords.advanceExact(doc)) { - for (long globalOrd = ords.nextOrd(); - globalOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long globalOrd = ords.nextOrd(); + globalOrd != SortedSetDocValues.NO_MORE_ORDS; globalOrd = ords.nextOrd()) { long bucketOrd = bucketOrds.add(globalOrd); if (bucketOrd < 0) { @@ -337,7 +338,7 @@ public static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { private SortedSetDocValues segmentOrds; public LowCardinality(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, - Terms.Order order, DocValueFormat format, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -371,8 +372,8 @@ public void collect(int doc, long bucket) throws IOException { public void collect(int doc, long bucket) throws IOException { assert bucket == 0; if (ords.advanceExact(doc)) { - for (long segmentOrd = ords.nextOrd(); - segmentOrd != SortedSetDocValues.NO_MORE_ORDS; + for (long segmentOrd = ords.nextOrd(); + segmentOrd != SortedSetDocValues.NO_MORE_ORDS; segmentOrd = ords.nextOrd()) { segmentDocCounts.increment(segmentOrd + 1, 1); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index 57c80c5fb40f0..26f8d809fe5fa 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.Iterator; @@ -46,7 +47,7 @@ public abstract class InternalMappedTerms, B exten protected long docCountError; - protected InternalMappedTerms(String name, Terms.Order order, int requiredSize, long minDocCount, + protected InternalMappedTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData); @@ -83,7 +84,7 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { @Override protected void setDocCountError(long docCountError) { - this.docCountError = docCountError; + this.docCountError = docCountError; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java deleted file mode 100644 index 513e7a1ac0ec8..0000000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalOrder.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.terms; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.Comparators; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; -import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; -import org.elasticsearch.search.aggregations.support.AggregationPath; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; - -class InternalOrder extends Terms.Order { - - private static final byte COUNT_DESC_ID = 1; - private static final byte COUNT_ASC_ID = 2; - private static final byte TERM_DESC_ID = 3; - private static final byte TERM_ASC_ID = 4; - - /** - * Order by the (higher) count of each term. - */ - public static final InternalOrder COUNT_DESC = new InternalOrder(COUNT_DESC_ID, "_count", false, new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return Long.compare(o2.getDocCount(), o1.getDocCount()); - } - }); - - /** - * Order by the (lower) count of each term. - */ - public static final InternalOrder COUNT_ASC = new InternalOrder(COUNT_ASC_ID, "_count", true, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return Long.compare(o1.getDocCount(), o2.getDocCount()); - } - }); - - /** - * Order by the terms. - */ - public static final InternalOrder TERM_DESC = new InternalOrder(TERM_DESC_ID, "_term", false, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return o2.compareTerm(o1); - } - }); - - /** - * Order by the terms. - */ - public static final InternalOrder TERM_ASC = new InternalOrder(TERM_ASC_ID, "_term", true, new Comparator() { - - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - return o1.compareTerm(o2); - } - }); - - public static boolean isCountDesc(Terms.Order order) { - if (order == COUNT_DESC) { - return true; - } else if (order instanceof CompoundOrder) { - // check if its a compound order with count desc and the tie breaker (term asc) - CompoundOrder compoundOrder = (CompoundOrder) order; - if (compoundOrder.orderElements.size() == 2 && compoundOrder.orderElements.get(0) == COUNT_DESC && compoundOrder.orderElements.get(1) == TERM_ASC) { - return true; - } - } - return false; - } - - public static boolean isTermOrder(Terms.Order order) { - if (order == TERM_ASC) { - return true; - } else if (order == TERM_DESC) { - return true; - } else if (order instanceof CompoundOrder) { - // check if its a compound order with only a single element ordering - // by term - CompoundOrder compoundOrder = (CompoundOrder) order; - if (compoundOrder.orderElements.size() == 1 && compoundOrder.orderElements.get(0) == TERM_ASC - || compoundOrder.orderElements.get(0) == TERM_DESC) { - return true; - } - } - return false; - } - - final byte id; - final String key; - final boolean asc; - protected final Comparator comparator; - - InternalOrder(byte id, String key, boolean asc, Comparator comparator) { - this.id = id; - this.key = key; - this.asc = asc; - this.comparator = comparator; - } - - @Override - byte id() { - return id; - } - - @Override - protected Comparator comparator(Aggregator aggregator) { - return comparator; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject().field(key, asc ? "asc" : "desc").endObject(); - } - - public static Terms.Order validate(Terms.Order order, Aggregator termsAggregator) { - if (order instanceof CompoundOrder) { - for (Terms.Order innerOrder : ((CompoundOrder)order).orderElements) { - validate(innerOrder, termsAggregator); - } - return order; - } else if (!(order instanceof Aggregation)) { - return order; - } - AggregationPath path = ((Aggregation) order).path(); - path.validate(termsAggregator); - return order; - } - - static class Aggregation extends InternalOrder { - - static final byte ID = 0; - - Aggregation(String key, boolean asc) { - super(ID, key, asc, new MultiBucketsAggregation.Bucket.SubAggregationComparator(key, asc)); - } - - AggregationPath path() { - return ((MultiBucketsAggregation.Bucket.SubAggregationComparator) comparator).path(); - } - - @Override - protected Comparator comparator(Aggregator termsAggregator) { - if (termsAggregator == null) { - return comparator; - } - - // Internal Optimization: - // - // in this phase, if the order is based on sub-aggregations, we need to use a different comparator - // to avoid constructing buckets for ordering purposes (we can potentially have a lot of buckets and building - // them will cause loads of redundant object constructions). The "special" comparators here will fetch the - // sub aggregation values directly from the sub aggregators bypassing bucket creation. Note that the comparator - // attached to the order will still be used in the reduce phase of the Aggregation. - - AggregationPath path = path(); - final Aggregator aggregator = path.resolveAggregator(termsAggregator); - final String key = path.lastPathElement().key; - - if (aggregator instanceof SingleBucketAggregator) { - assert key == null : "this should be picked up before the aggregation is executed - on validate"; - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - int mul = asc ? 1 : -1; - int v1 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) o1).bucketOrd); - int v2 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) o2).bucketOrd); - return mul * (v1 - v2); - } - }; - } - - // with only support single-bucket aggregators - assert !(aggregator instanceof BucketsAggregator) : "this should be picked up before the aggregation is executed - on validate"; - - if (aggregator instanceof NumericMetricsAggregator.MultiValue) { - assert key != null : "this should be picked up before the aggregation is executed - on validate"; - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - double v1 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) o1).bucketOrd); - double v2 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) o2).bucketOrd); - // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to - // the bottom - return Comparators.compareDiscardNaN(v1, v2, asc); - } - }; - } - - // single-value metrics agg - return new Comparator() { - @Override - public int compare(Terms.Bucket o1, Terms.Bucket o2) { - double v1 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o1).bucketOrd); - double v2 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) o2).bucketOrd); - // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to - // the bottom - return Comparators.compareDiscardNaN(v1, v2, asc); - } - }; - } - } - - static class CompoundOrder extends Terms.Order { - - static final byte ID = -1; - - private final List orderElements; - - CompoundOrder(List compoundOrder) { - this(compoundOrder, true); - } - - CompoundOrder(List compoundOrder, boolean absoluteOrdering) { - this.orderElements = new LinkedList<>(compoundOrder); - Terms.Order lastElement = compoundOrder.get(compoundOrder.size() - 1); - if (absoluteOrdering && !(InternalOrder.TERM_ASC == lastElement || InternalOrder.TERM_DESC == lastElement)) { - // add term order ascending as a tie-breaker to avoid non-deterministic ordering - // if all user provided comparators return 0. - this.orderElements.add(Order.term(true)); - } - } - - @Override - byte id() { - return ID; - } - - List orderElements() { - return Collections.unmodifiableList(orderElements); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(); - for (Terms.Order order : orderElements) { - order.toXContent(builder, params); - } - return builder.endArray(); - } - - @Override - protected Comparator comparator(Aggregator aggregator) { - return new CompoundOrderComparator(orderElements, aggregator); - } - - @Override - public int hashCode() { - return Objects.hash(orderElements); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - CompoundOrder other = (CompoundOrder) obj; - return Objects.equals(orderElements, other.orderElements); - } - - public static class CompoundOrderComparator implements Comparator { - - private List compoundOrder; - private Aggregator aggregator; - - CompoundOrderComparator(List compoundOrder, Aggregator aggregator) { - this.compoundOrder = compoundOrder; - this.aggregator = aggregator; - } - - @Override - public int compare(Bucket o1, Bucket o2) { - int result = 0; - for (Iterator itr = compoundOrder.iterator(); itr.hasNext() && result == 0;) { - result = itr.next().comparator(aggregator).compare(o1, o2); - } - return result; - } - } - } - - public static class Streams { - - public static void writeOrder(Terms.Order order, StreamOutput out) throws IOException { - if (order instanceof Aggregation) { - out.writeByte(order.id()); - Aggregation aggregationOrder = (Aggregation) order; - out.writeBoolean(((MultiBucketsAggregation.Bucket.SubAggregationComparator) aggregationOrder.comparator).asc()); - AggregationPath path = ((Aggregation) order).path(); - out.writeString(path.toString()); - } else if (order instanceof CompoundOrder) { - CompoundOrder compoundOrder = (CompoundOrder) order; - out.writeByte(order.id()); - out.writeVInt(compoundOrder.orderElements.size()); - for (Terms.Order innerOrder : compoundOrder.orderElements) { - Streams.writeOrder(innerOrder, out); - } - } else { - out.writeByte(order.id()); - } - } - - public static Terms.Order readOrder(StreamInput in) throws IOException { - return readOrder(in, false); - } - - public static Terms.Order readOrder(StreamInput in, boolean absoluteOrder) throws IOException { - byte id = in.readByte(); - switch (id) { - case COUNT_DESC_ID: return absoluteOrder ? new CompoundOrder(Collections.singletonList((Terms.Order) InternalOrder.COUNT_DESC)) : InternalOrder.COUNT_DESC; - case COUNT_ASC_ID: return absoluteOrder ? new CompoundOrder(Collections.singletonList((Terms.Order) InternalOrder.COUNT_ASC)) : InternalOrder.COUNT_ASC; - case TERM_DESC_ID: return InternalOrder.TERM_DESC; - case TERM_ASC_ID: return InternalOrder.TERM_ASC; - case Aggregation.ID: - boolean asc = in.readBoolean(); - String key = in.readString(); - return new InternalOrder.Aggregation(key, asc); - case CompoundOrder.ID: - int size = in.readVInt(); - List compoundOrder = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - compoundOrder.add(Streams.readOrder(in, false)); - } - return new CompoundOrder(compoundOrder, absoluteOrder); - default: - throw new RuntimeException("unknown terms order"); - } - } - } - - @Override - public int hashCode() { - return Objects.hash(id, asc); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - InternalOrder other = (InternalOrder) obj; - return Objects.equals(id, other.id) - && Objects.equals(asc, other.asc); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 3834f9a65be53..24d1d301623db 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -31,6 +31,9 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.KeyComparable; import java.io.IOException; import java.util.ArrayList; @@ -46,8 +49,8 @@ public abstract class InternalTerms, B extends Int protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound"); protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count"); - public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket { - + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket + implements Terms.Bucket, KeyComparable { /** * Reads a bucket. Should be a constructor reference. */ @@ -177,11 +180,11 @@ public int hashCode() { } } - protected final Terms.Order order; + protected final BucketOrder order; protected final int requiredSize; protected final long minDocCount; - protected InternalTerms(String name, Terms.Order order, int requiredSize, long minDocCount, + protected InternalTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.order = order; @@ -201,7 +204,7 @@ protected InternalTerms(StreamInput in) throws IOException { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - InternalOrder.Streams.writeOrder(order, out); + order.writeTo(out); writeSize(requiredSize, out); out.writeVLong(minDocCount); writeTermTypeInfoTo(out); @@ -238,9 +241,9 @@ public InternalAggregation doReduce(List aggregations, Redu } otherDocCount += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError; - if (terms.getBuckets().size() < getShardSize() || InternalOrder.isTermOrder(order)) { + if (terms.getBuckets().size() < getShardSize() || InternalOrder.isKeyOrder(order)) { thisAggDocCountError = 0; - } else if (InternalOrder.isCountDesc(this.order)) { + } else if (InternalOrder.isCountDesc(order)) { if (terms.getDocCountError() > 0) { // If there is an existing docCountError for this agg then // use this as the error for this aggregation diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 0de13a4d98f6b..025c397d3bd00 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.ArrayList; @@ -76,8 +77,8 @@ public Number getKeyAsNumber() { } @Override - public int compareTerm(Terms.Bucket other) { - return Long.compare(term, ((Number) other.getKey()).longValue()); + public int compareKey(Bucket other) { + return Long.compare(term, other.term); } @Override @@ -105,7 +106,7 @@ public int hashCode() { } } - public LongTerms(String name, Terms.Order order, int requiredSize, long minDocCount, List pipelineAggregators, + public LongTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, shardSize, showTermDocCountError, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index 8f8e2f3079b67..752666de67872 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -32,6 +32,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude.LongFilter; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -50,7 +52,7 @@ public class LongTermsAggregator extends TermsAggregator { private LongFilter longFilter; public LongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, + BucketOrder order, BucketCountThresholds bucketCountThresholds, SearchContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, bucketCountThresholds, order, format, subAggCollectMode, pipelineAggregators, metaData); @@ -106,7 +108,7 @@ public void collect(int doc, long owningBucketOrdinal) throws IOException { public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; - if (bucketCountThresholds.getMinDocCount() == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { + if (bucketCountThresholds.getMinDocCount() == 0 && (InternalOrder.isCountDesc(order) == false || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { // we need to fill-in the blanks for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) { final SortedNumericDocValues values = getValues(valuesSource, ctx); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 049d996c08c2e..8c7f09ebe8322 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.List; @@ -75,8 +76,8 @@ public String getKeyAsString() { } @Override - public int compareTerm(Terms.Bucket other) { - return termBytes.compareTo(((Bucket) other).termBytes); + public int compareKey(Bucket other) { + return termBytes.compareTo(other.termBytes); } @Override @@ -100,7 +101,7 @@ public int hashCode() { } } - public StringTerms(String name, Terms.Order order, int requiredSize, long minDocCount, List pipelineAggregators, + public StringTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List buckets, long docCountError) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData, format, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index 61c46cdfd68a9..6161f7912a8ad 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; @@ -51,7 +53,7 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { private final IncludeExclude.StringFilter includeExclude; public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, - Terms.Order order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, + BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.StringFilter includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) throws IOException { @@ -110,7 +112,7 @@ public void collect(int doc, long bucket) throws IOException { public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0; - if (bucketCountThresholds.getMinDocCount() == 0 && (order != InternalOrder.COUNT_DESC || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { + if (bucketCountThresholds.getMinDocCount() == 0 && (InternalOrder.isCountDesc(order) == false || bucketOrds.size() < bucketCountThresholds.getRequiredSize())) { // we need to fill-in the blanks for (LeafReaderContext ctx : context.searcher().getTopReaderContext().leaves()) { final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java index 166ece4e1122d..f14ecae7d165c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java @@ -18,12 +18,8 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import java.util.Arrays; -import java.util.Comparator; import java.util.List; /** @@ -39,8 +35,6 @@ interface Bucket extends MultiBucketsAggregation.Bucket { Number getKeyAsNumber(); - int compareTerm(Terms.Bucket other); - long getDocCountError(); } @@ -65,84 +59,4 @@ interface Bucket extends MultiBucketsAggregation.Bucket { * it to the top buckets. */ long getSumOfOtherDocCounts(); - - /** - * Determines the order by which the term buckets will be sorted - */ - abstract class Order implements ToXContent { - - /** - * @return a bucket ordering strategy that sorts buckets by their document counts (ascending or descending) - */ - public static Order count(boolean asc) { - return asc ? InternalOrder.COUNT_ASC : InternalOrder.COUNT_DESC; - } - - /** - * @return a bucket ordering strategy that sorts buckets by their terms (ascending or descending) - */ - public static Order term(boolean asc) { - return asc ? InternalOrder.TERM_ASC : InternalOrder.TERM_DESC; - } - - /** - * Creates a bucket ordering strategy which sorts buckets based on a single-valued calc get - * - * @param path the name of the get - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String path, boolean asc) { - return new InternalOrder.Aggregation(path, asc); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based on a multi-valued calc get - * - * @param aggregationName the name of the get - * @param metricName The name of the value of the multi-value get by which the sorting will be applied - * @param asc The direction of the order (ascending or descending) - */ - public static Order aggregation(String aggregationName, String metricName, boolean asc) { - return new InternalOrder.Aggregation(aggregationName + "." + metricName, asc); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based multiple criteria - * - * @param orders a list of {@link Order} objects to sort on, in order of priority - */ - public static Order compound(List orders) { - return new InternalOrder.CompoundOrder(orders); - } - - /** - * Creates a bucket ordering strategy which sorts buckets based multiple criteria - * - * @param orders a list of {@link Order} parameters to sort on, in order of priority - */ - public static Order compound(Order... orders) { - return compound(Arrays.asList(orders)); - } - - /** - * @return A comparator for the bucket based on the given terms aggregator. The comparator is used in two phases: - * - * - aggregation phase, where each shard builds a list of term buckets to be sent to the coordinating node. - * In this phase, the passed in aggregator will be the terms aggregator that aggregates the buckets on the - * shard level. - * - * - reduce phase, where the coordinating node gathers all the buckets from all the shards and reduces them - * to a final bucket list. In this case, the passed in aggregator will be {@code null} - */ - protected abstract Comparator comparator(Aggregator aggregator); - - abstract byte id(); - - @Override - public abstract int hashCode(); - - @Override - public abstract boolean equals(Object obj); - - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index 944f9fd96a402..cb239781e3e40 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -19,20 +19,20 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -82,7 +82,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder SubAggCollectionMode.parse(p.text()), SubAggCollectionMode.KEY, ObjectParser.ValueType.STRING); - PARSER.declareObjectArray(TermsAggregationBuilder::order, TermsAggregationBuilder::parseOrderParam, + PARSER.declareObjectArray(TermsAggregationBuilder::order, InternalOrder.Parser::parseOrderParam, TermsAggregationBuilder.ORDER_FIELD); PARSER.declareField((b, v) -> b.includeExclude(IncludeExclude.merge(v, b.includeExclude())), @@ -96,7 +96,7 @@ public static AggregationBuilder parse(String aggregationName, QueryParseContext return PARSER.parse(context.parser(), new TermsAggregationBuilder(aggregationName, null), context); } - private Terms.Order order = Terms.Order.compound(Terms.Order.count(false), Terms.Order.term(true)); + private BucketOrder order = BucketOrder.compound(BucketOrder.count(false)); // automatically adds tie-breaker key asc order private IncludeExclude includeExclude = null; private String executionHint = null; private SubAggCollectionMode collectMode = null; @@ -132,7 +132,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(collectMode); out.writeOptionalString(executionHint); out.writeOptionalWriteable(includeExclude); - InternalOrder.Streams.writeOrder(order, out); + order.writeTo(out); out.writeBoolean(showTermDocCountError); } @@ -189,32 +189,37 @@ public TermsAggregationBuilder shardMinDocCount(long shardMinDocCount) { return this; } - /** - * Sets the order in which the buckets will be returned. - */ - public TermsAggregationBuilder order(Terms.Order order) { + /** Set a new order on this builder and return the builder so that calls + * can be chained. A tie-breaker may be added to avoid non-deterministic ordering. */ + public TermsAggregationBuilder order(BucketOrder order) { if (order == null) { throw new IllegalArgumentException("[order] must not be null: [" + name + "]"); } - this.order = order; + if(order instanceof CompoundOrder || InternalOrder.isKeyOrder(order)) { + this.order = order; // if order already contains a tie-breaker we are good to go + } else { // otherwise add a tie-breaker by using a compound order + this.order = BucketOrder.compound(order); + } return this; } /** - * Sets the order in which the buckets will be returned. + * Sets the order in which the buckets will be returned. A tie-breaker may be added to avoid non-deterministic + * ordering. */ - public TermsAggregationBuilder order(List orders) { + public TermsAggregationBuilder order(List orders) { if (orders == null) { throw new IllegalArgumentException("[orders] must not be null: [" + name + "]"); } - order(Terms.Order.compound(orders)); + // if the list only contains one order use that to avoid inconsistent xcontent + order(orders.size() > 1 ? BucketOrder.compound(orders) : orders.get(0)); return this; } /** * Gets the order in which the buckets will be returned. */ - public Terms.Order order() { + public BucketOrder order() { return order; } @@ -327,45 +332,4 @@ public String getType() { return NAME; } - private static Terms.Order parseOrderParam(XContentParser parser, QueryParseContext context) throws IOException { - XContentParser.Token token; - Terms.Order orderParam = null; - String orderKey = null; - boolean orderAsc = false; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - orderKey = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - String dir = parser.text(); - if ("asc".equalsIgnoreCase(dir)) { - orderAsc = true; - } else if ("desc".equalsIgnoreCase(dir)) { - orderAsc = false; - } else { - throw new ParsingException(parser.getTokenLocation(), - "Unknown terms order direction [" + dir + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " for [order]"); - } - } - if (orderKey == null) { - throw new ParsingException(parser.getTokenLocation(), - "Must specify at least one field for [order]"); - } else { - orderParam = resolveOrder(orderKey, orderAsc); - } - return orderParam; - } - - static Terms.Order resolveOrder(String key, boolean asc) { - if ("_term".equals(key)) { - return Order.term(asc); - } - if ("_count".equals(key)) { - return Order.count(asc); - } - return Order.aggregation(key, asc); - } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index 78d6cde211cde..4cada8f70bf38 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -24,19 +24,26 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Comparators; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; -import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.Aggregation; -import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.CompoundOrder; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; +import org.elasticsearch.search.aggregations.InternalOrder.Aggregation; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -168,12 +175,12 @@ public boolean equals(Object obj) { protected final DocValueFormat format; protected final BucketCountThresholds bucketCountThresholds; - protected final Terms.Order order; + protected final BucketOrder order; protected final Set aggsUsedForSorting = new HashSet<>(); protected final SubAggCollectionMode collectMode; public TermsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, - BucketCountThresholds bucketCountThresholds, Terms.Order order, DocValueFormat format, SubAggCollectionMode collectMode, + BucketCountThresholds bucketCountThresholds, BucketOrder order, DocValueFormat format, SubAggCollectionMode collectMode, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.bucketCountThresholds = bucketCountThresholds; @@ -186,7 +193,7 @@ public TermsAggregator(String name, AggregatorFactories factories, SearchContext aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); } else if (order instanceof CompoundOrder) { CompoundOrder compoundOrder = (CompoundOrder) order; - for (Terms.Order orderElement : compoundOrder.orderElements()) { + for (BucketOrder orderElement : compoundOrder.orderElements()) { if (orderElement instanceof Aggregation) { AggregationPath path = ((Aggregation) orderElement).path(); aggsUsedForSorting.add(path.resolveTopmostAggregator(this)); @@ -195,6 +202,58 @@ public TermsAggregator(String name, AggregatorFactories factories, SearchContext } } + /** + * Internal Optimization for ordering {@link InternalTerms.Bucket}s by a sub aggregation. + *

+ * in this phase, if the order is based on sub-aggregations, we need to use a different comparator + * to avoid constructing buckets for ordering purposes (we can potentially have a lot of buckets and building + * them will cause loads of redundant object constructions). The "special" comparators here will fetch the + * sub aggregation values directly from the sub aggregators bypassing bucket creation. Note that the comparator + * attached to the order will still be used in the reduce phase of the Aggregation. + * + * @param path determines which sub aggregation to use for ordering. + * @param asc {@code true} for ascending order, {@code false} for descending. + * @return {@code Comparator} to order {@link InternalTerms.Bucket}s in the desired order. + */ + public Comparator bucketComparator(AggregationPath path, boolean asc) { + + final Aggregator aggregator = path.resolveAggregator(this); + final String key = path.lastPathElement().key; + + if (aggregator instanceof SingleBucketAggregator) { + assert key == null : "this should be picked up before the aggregation is executed - on validate"; + return (b1, b2) -> { + int mul = asc ? 1 : -1; + int v1 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) b1).bucketOrd); + int v2 = ((SingleBucketAggregator) aggregator).bucketDocCount(((InternalTerms.Bucket) b2).bucketOrd); + return mul * (v1 - v2); + }; + } + + // with only support single-bucket aggregators + assert !(aggregator instanceof BucketsAggregator) : "this should be picked up before the aggregation is executed - on validate"; + + if (aggregator instanceof NumericMetricsAggregator.MultiValue) { + assert key != null : "this should be picked up before the aggregation is executed - on validate"; + return (b1, b2) -> { + double v1 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) b1).bucketOrd); + double v2 = ((NumericMetricsAggregator.MultiValue) aggregator).metric(key, ((InternalTerms.Bucket) b2).bucketOrd); + // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to + // the bottom + return Comparators.compareDiscardNaN(v1, v2, asc); + }; + } + + // single-value metrics agg + return (b1, b2) -> { + double v1 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) b1).bucketOrd); + double v2 = ((NumericMetricsAggregator.SingleValue) aggregator).metric(((InternalTerms.Bucket) b2).bucketOrd); + // some metrics may return NaN (eg. avg, variance, etc...) in which case we'd like to push all of those to + // the bottom + return Comparators.compareDiscardNaN(v1, v2, asc); + }; + } + @Override protected boolean shouldDefer(Aggregator aggregator) { return collectMode == SubAggCollectionMode.BREADTH_FIRST diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 10fef55455551..9a06dfe66f592 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -33,6 +33,8 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -44,14 +46,14 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { - private final Terms.Order order; + private final BucketOrder order; private final IncludeExclude includeExclude; private final String executionHint; private final SubAggCollectionMode collectMode; private final TermsAggregator.BucketCountThresholds bucketCountThresholds; private boolean showTermDocCountError; - public TermsAggregatorFactory(String name, ValuesSourceConfig config, Terms.Order order, + public TermsAggregatorFactory(String name, ValuesSourceConfig config, BucketOrder order, IncludeExclude includeExclude, String executionHint, SubAggCollectionMode collectMode, TermsAggregator.BucketCountThresholds bucketCountThresholds, boolean showTermDocCountError, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { @@ -90,7 +92,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare return asMultiBucketAggregator(this, context, parent); } BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds); - if (!(order == InternalOrder.TERM_ASC || order == InternalOrder.TERM_DESC) + if (InternalOrder.isKeyOrder(order) == false && bucketCountThresholds.getShardSize() == TermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { // The user has not made a shardSize selection. Use default // heuristic to avoid any wrong-ranking caused by distributed @@ -129,7 +131,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare // to be unbounded and most instances may only aggregate few // documents, so use hashed based // global ordinals to keep the bucket ords dense. - // Additionally, if using partitioned terms the regular global + // Additionally, if using partitioned terms the regular global // ordinals would be sparse so we opt for hash if (Aggregator.descendsFromBucketAggregator(parent) || (includeExclude != null && includeExclude.isPartitionBased())) { @@ -223,7 +225,7 @@ public enum ExecutionMode { MAP(new ParseField("map")) { @Override - Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, + Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketOrder order, DocValueFormat format, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -242,7 +244,7 @@ boolean needsGlobalOrdinals() { GLOBAL_ORDINALS(new ParseField("global_ordinals")) { @Override - Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, + Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketOrder order, DocValueFormat format, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -262,7 +264,7 @@ boolean needsGlobalOrdinals() { GLOBAL_ORDINALS_HASH(new ParseField("global_ordinals_hash")) { @Override - Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, + Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketOrder order, DocValueFormat format, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -281,7 +283,7 @@ boolean needsGlobalOrdinals() { GLOBAL_ORDINALS_LOW_CARDINALITY(new ParseField("global_ordinals_low_cardinality")) { @Override - Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, + Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketOrder order, DocValueFormat format, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) @@ -319,7 +321,7 @@ public static ExecutionMode fromString(String value) { this.parseField = parseField; } - abstract Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, + abstract Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketOrder order, DocValueFormat format, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, SearchContext context, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List pipelineAggregators, Map metaData) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 40cbacd37e698..6362f8c347b3a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.io.IOException; import java.util.Collections; @@ -50,7 +51,7 @@ private Bucket(long docCount, InternalAggregations aggregations, boolean showDoc } } - public UnmappedTerms(String name, Terms.Order order, int requiredSize, long minDocCount, + public UnmappedTerms(String name, BucketOrder order, int requiredSize, long minDocCount, List pipelineAggregators, Map metaData) { super(name, order, requiredSize, minDocCount, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index 746e0e5e16106..995381373ab40 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -33,7 +33,7 @@ import java.util.List; /** - * A path that can be used to sort/order buckets (in some multi-bucket aggregations, eg terms & histogram) based on + * A path that can be used to sort/order buckets (in some multi-bucket aggregations, e.g. terms & histogram) based on * sub-aggregations. The path may point to either a single-bucket aggregation or a metrics aggregation. If the path * points to a single-bucket aggregation, the sort will be applied based on the {@code doc_count} of the bucket. If this * path points to a metrics aggregation, if it's a single-value metrics (eg. avg, max, min, etc..) the sort will be @@ -281,14 +281,15 @@ public Aggregator resolveTopmostAggregator(Aggregator root) { /** * Validates this path over the given aggregator as a point of reference. * - * @param root The point of reference of this path + * @param root The point of reference of this path + * @throws AggregationExecutionException on validation error */ - public void validate(Aggregator root) { + public void validate(Aggregator root) throws AggregationExecutionException { Aggregator aggregator = root; for (int i = 0; i < pathElements.size(); i++) { aggregator = aggregator.subAggregator(pathElements.get(i).name); if (aggregator == null) { - throw new AggregationExecutionException("Invalid term-aggregator order path [" + this + "]. Unknown aggregation [" + throw new AggregationExecutionException("Invalid aggregator order path [" + this + "]. Unknown aggregation [" + pathElements.get(i).name + "]"); } if (i < pathElements.size() - 1) { @@ -296,16 +297,16 @@ public void validate(Aggregator root) { // we're in the middle of the path, so the aggregator can only be a single-bucket aggregator if (!(aggregator instanceof SingleBucketAggregator)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a final " + "single-bucket or a metrics aggregation at the path end. Sub-path [" + subPath(0, i + 1) + "] points to non single-bucket aggregation"); } if (pathElements.get(i).key != null) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a " + "final single-bucket or a metrics aggregation at the path end. Sub-path [" + subPath(0, i + 1) + "] points to non single-bucket aggregation"); @@ -314,8 +315,8 @@ public void validate(Aggregator root) { } boolean singleBucket = aggregator instanceof SingleBucketAggregator; if (!singleBucket && !(aggregator instanceof NumericMetricsAggregator)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + - "]. Terms buckets can only be sorted on a sub-aggregator path " + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + + "]. Buckets can only be sorted on a sub-aggregator path " + "that is built out of zero or more single-bucket aggregations within the path and a final " + "single-bucket or a metrics aggregation at the path end."); } @@ -324,7 +325,7 @@ public void validate(Aggregator root) { if (singleBucket) { if (lastToken.key != null && !"doc_count".equals(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Ordering on a single-bucket aggregation can only be done on its doc_count. " + "Either drop the key (a la \"" + lastToken.name + "\") or change it to \"doc_count\" (a la \"" + lastToken.name + ".doc_count\")"); } @@ -333,7 +334,7 @@ public void validate(Aggregator root) { if (aggregator instanceof NumericMetricsAggregator.SingleValue) { if (lastToken.key != null && !"value".equals(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Ordering on a single-value metrics aggregation can only be done on its value. " + "Either drop the key (a la \"" + lastToken.name + "\") or change it to \"value\" (a la \"" + lastToken.name + ".value\")"); } @@ -342,12 +343,12 @@ public void validate(Aggregator root) { // the aggregator must be of a multi-value metrics type if (lastToken.key == null) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. When ordering on a multi-value metrics aggregation a metric name must be specified"); } if (!((NumericMetricsAggregator.MultiValue) aggregator).hasMetric(lastToken.key)) { - throw new AggregationExecutionException("Invalid terms aggregation order path [" + this + + throw new AggregationExecutionException("Invalid aggregation order path [" + this + "]. Unknown metric name [" + lastToken.key + "] on multi-value metrics aggregation [" + lastToken.name + "]"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index f2d66409e9e42..c321ffa965a57 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lucene.search.MoreLikeThisQuery; @@ -64,6 +65,8 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase { + private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[]{Item.Field.DOC.getPreferredName()}; + private static String[] randomFields; private static Item[] randomLikeItems; private static Item[] randomUnlikeItems; @@ -204,6 +207,16 @@ protected MoreLikeThisQueryBuilder doCreateTestQueryBuilder() { return queryBuilder; } + /** + * we don't want to shuffle the "doc" field internally in {@link #testFromXContent()} because even though the + * documents would be functionally the same, their {@link BytesReference} representation isn't and thats what we + * compare when check for equality of the original and the shuffled builder + */ + @Override + protected String[] shuffleProtectedFields() { + return SHUFFLE_PROTECTED_FIELDS; + } + @Override protected Set getObjectsHoldingArbitraryContent() { //doc contains arbitrary content, anything can be added to it and no exception will be thrown diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 67ddcbd6b0e5a..90d11efb11c22 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -83,6 +83,9 @@ public class FunctionScoreQueryBuilderTests extends AbstractQueryTestCase { + private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[] {Script.PARAMS_PARSE_FIELD.getPreferredName(), + ExponentialDecayFunctionBuilder.NAME, LinearDecayFunctionBuilder.NAME, GaussDecayFunctionBuilder.NAME}; + @Override protected Collection> getPlugins() { return Collections.singleton(TestPlugin.class); @@ -106,6 +109,12 @@ protected FunctionScoreQueryBuilder doCreateTestQueryBuilder() { return functionScoreQueryBuilder; } + @Override + protected String[] shuffleProtectedFields() { + // do not shuffle fields that may contain arbitrary content + return SHUFFLE_PROTECTED_FIELDS; + } + @Override protected Set getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when adding additional objects diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java new file mode 100644 index 0000000000000..43d10af99fbe9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/InternalOrderTests.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class InternalOrderTests extends AbstractSerializingTestCase { + + @Override + protected BucketOrder createTestInstance() { + if (randomBoolean()) { + return getRandomOrder(); + } else { + List orders = new ArrayList<>(); + for (int i = 0; i < randomInt(3); i++) { + orders.add(getRandomOrder()); + } + return BucketOrder.compound(orders); + } + } + + private BucketOrder getRandomOrder() { + switch(randomInt(2)) { + case 0: return BucketOrder.key(randomBoolean()); + case 1: return BucketOrder.count(randomBoolean()); + default: return BucketOrder.aggregation(randomAlphaOfLength(10), randomBoolean()); + } + } + + @Override + protected Reader instanceReader() { + return InternalOrder.Streams::readOrder; + } + + @Override + protected BucketOrder doParseInstance(XContentParser parser) throws IOException { + Token token = parser.nextToken(); + if (token == Token.START_OBJECT) { + return InternalOrder.Parser.parseOrderParam(parser, null); + } + if (token == Token.START_ARRAY) { + List orders = new ArrayList<>(); + while (parser.nextToken() == Token.START_OBJECT) { + orders.add(InternalOrder.Parser.parseOrderParam(parser, null)); + } + return BucketOrder.compound(orders); + } + return null; + } + + @Override + protected BucketOrder assertSerialization(BucketOrder testInstance) throws IOException { + // identical behavior to AbstractWireSerializingTestCase, except assertNotSame is only called for + // compound and aggregation order because _key and _count orders are static instances. + BucketOrder deserializedInstance = copyInstance(testInstance); + assertEquals(testInstance, deserializedInstance); + assertEquals(testInstance.hashCode(), deserializedInstance.hashCode()); + if(testInstance instanceof CompoundOrder || testInstance instanceof InternalOrder.Aggregation) { + assertNotSame(testInstance, deserializedInstance); + } + return deserializedInstance; + } + + @Override + protected void assertParsedInstance(XContentType xContentType, BytesReference instanceAsBytes, BucketOrder expectedInstance) + throws IOException { + // identical behavior to AbstractSerializingTestCase, except assertNotSame is only called for + // compound and aggregation order because _key and _count orders are static instances. + XContentParser parser = createParser(XContentFactory.xContent(xContentType), instanceAsBytes); + BucketOrder newInstance = parseInstance(parser); + assertEquals(expectedInstance, newInstance); + assertEquals(expectedInstance.hashCode(), newInstance.hashCode()); + if(expectedInstance instanceof CompoundOrder || expectedInstance instanceof InternalOrder.Aggregation) { + assertNotSame(newInstance, expectedInstance); + } + } + + public void testHistogramOrderBwc() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + BucketOrder order = createTestInstance(); + Version bwcVersion = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), + VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2_UNRELEASED)); + boolean bwcOrderFlag = randomBoolean(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(bwcVersion); + InternalOrder.Streams.writeHistogramOrder(order, out, bwcOrderFlag); + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(bwcVersion); + BucketOrder actual = InternalOrder.Streams.readHistogramOrder(in, bwcOrderFlag); + BucketOrder expected = order; + if (order instanceof CompoundOrder) { + expected = ((CompoundOrder) order).orderElements.get(0); + } + assertEquals(expected, actual); + } + } + } + } + + public void testAggregationOrderEqualsAndHashCode() { + String path = randomAlphaOfLength(10); + boolean asc = randomBoolean(); + BucketOrder o1 = BucketOrder.aggregation(path, asc); + BucketOrder o2 = BucketOrder.aggregation(path + "test", asc); + BucketOrder o3 = BucketOrder.aggregation(path, !asc); + BucketOrder o4 = BucketOrder.aggregation(path, asc); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o3); + assertNotEquals(o1.hashCode(), o3.hashCode()); + assertEquals(o1, o4); + assertEquals(o1.hashCode(), o4.hashCode()); + + o1 = InternalOrder.compound(o1); + o2 = InternalOrder.compound(o2); + o3 = InternalOrder.compound(o3); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o2); + assertNotEquals(o1.hashCode(), o2.hashCode()); + assertNotEquals(o1, o3); + assertNotEquals(o1.hashCode(), o3.hashCode()); + assertNotEquals(o1, o4); + assertNotEquals(o1.hashCode(), o4.hashCode()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index cef4cb07f884f..bedd8610a401e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; @@ -30,13 +32,16 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.DateScriptMocks.DateScriptsMockPlugin; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.joda.time.DateTime; @@ -57,6 +62,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; @@ -73,6 +79,8 @@ @ESIntegTestCase.SuiteScopeTestCase public class DateHistogramIT extends ESIntegTestCase { + static Map> expectedMultiSortBuckets; + private DateTime date(int month, int day) { return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); } @@ -98,6 +106,7 @@ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Excep return client().prepareIndex("idx", "type").setSource(jsonBuilder() .startObject() .field("value", value) + .field("constant", 1) .field("date", date(month, day)) .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray() .endObject()); @@ -115,6 +124,9 @@ public void setupSuiteScopeCluster() throws Exception { .field("value", i * 2) .endObject())); } + + getMultiSortDocs(builders); + builders.addAll(Arrays.asList( indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 @@ -126,6 +138,50 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } + private void addExpectedBucket(DateTime key, long docCount, double avg, double sum) { + Map bucketProps = new HashMap<>(); + bucketProps.put("_count", docCount); + bucketProps.put("avg_l", avg); + bucketProps.put("sum_d", sum); + expectedMultiSortBuckets.put(key, bucketProps); + } + + private void getMultiSortDocs(List builders) throws IOException { + expectedMultiSortBuckets = new HashMap<>(); + addExpectedBucket(date(1, 1), 3, 1, 6); + addExpectedBucket(date(1, 2), 3, 2, 6); + addExpectedBucket(date(1, 3), 2, 3, 3); + addExpectedBucket(date(1, 4), 2, 3, 4); + addExpectedBucket(date(1, 5), 2, 5, 3); + addExpectedBucket(date(1, 6), 1, 5, 1); + addExpectedBucket(date(1, 7), 1, 5, 1); + + assertAcked(client().admin().indices().prepareCreate("sort_idx") + .addMapping("type", "date", "type=date").get()); + for (int i = 1; i <= 3; i++) { + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 1)).field("l", 1).field("d", i).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 2)).field("l", 2).field("d", i).endObject())); + } + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 3)).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 4)).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 5)).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 6)).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field("date", date(1, 7)).field("l", 5).field("d", 1).endObject())); + } + @Override protected Collection> nodePlugins() { return Arrays.asList( @@ -281,7 +337,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.KEY_ASC)) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -304,7 +360,7 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.KEY_DESC)) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -326,7 +382,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_ASC)) + .order(BucketOrder.count(true))) .execute().actionGet(); assertSearchResponse(response); @@ -348,7 +404,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_DESC)) + .order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -428,7 +484,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("sum", true)) + .order(BucketOrder.aggregation("sum", true)) .subAggregation(max("sum").field("value"))) .execute().actionGet(); @@ -451,7 +507,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("sum", false)) + .order(BucketOrder.aggregation("sum", false)) .subAggregation(max("sum").field("value"))) .execute().actionGet(); @@ -474,7 +530,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws .addAggregation(dateHistogram("histo") .field("date") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.aggregation("stats", "sum", false)) + .order(BucketOrder.aggregation("stats", "sum", false)) .subAggregation(stats("stats").field("value"))) .execute().actionGet(); @@ -492,6 +548,60 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(dateHistogram("histo") + .field("date") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(3)); + + int i = 1; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertThat(bucket.getKey(), equalTo(date(i, 1))); + i++; + } + } + + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { + boolean asc = true; + try { + client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("inner_histo>avg", asc)) + .subAggregation(dateHistogram("inner_histo") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .field("dates") + .subAggregation(avg("avg").field("value")))) + .execute().actionGet(); + fail("Expected an exception"); + } catch (SearchPhaseExecutionException e) { + ElasticsearchException[] rootCauses = e.guessRootCauses(); + if (rootCauses.length == 1) { + ElasticsearchException rootCause = rootCauses[0]; + if (rootCause instanceof AggregationExecutionException) { + AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); + } else { + throw e; + } + } else { + throw e; + } + } + } + public void testSingleValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); @@ -583,12 +693,12 @@ public void testMultiValuedField() throws Exception { assertThat(bucket.getDocCount(), equalTo(3L)); } - public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { + public void testMultiValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("dates") .dateHistogramInterval(DateHistogramInterval.MONTH) - .order(Histogram.Order.COUNT_DESC)) + .order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -598,23 +708,26 @@ public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(4)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(3, 1))); assertThat(bucket.getDocCount(), equalTo(5L)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(2, 1))); assertThat(bucket.getDocCount(), equalTo(3L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(4, 1))); assertThat(bucket.getDocCount(), equalTo(3L)); bucket = buckets.get(3); assertThat(bucket, notNullValue()); + assertThat(bucket.getKey(), equalTo(date(1, 1))); assertThat(bucket.getDocCount(), equalTo(1L)); } @@ -1236,4 +1349,75 @@ public void testDontCacheScripts() throws Exception { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(1L)); } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { + int[] expectedDays = new int[] { 1, 2, 4, 3, 7, 6, 5 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { + int[] expectedDays = new int[] { 5, 6, 7, 3, 4, 2, 1 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { + int[] expectedDays = new int[] { 6, 7, 3, 4, 5, 1, 2 }; + assertMultiSortResponse(expectedDays, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { + int[] expectedDays = new int[] { 6, 7, 3, 5, 4, 1, 2 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { + int[] expectedDays = new int[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true)); + } + + private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { + DateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(DateTime[]::new); + SearchResponse response = client() + .prepareSearch("sort_idx") + .setTypes("type") + .addAggregation( + dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + + private DateTime key(Histogram.Bucket bucket) { + return (DateTime) bucket.getKey(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index 76e58c715bfc8..e86b3a553e9c4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -20,10 +20,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests; +import org.elasticsearch.search.aggregations.BucketOrder; + +import java.util.ArrayList; +import java.util.List; public class DateHistogramTests extends BaseAggregationTestCase { @@ -80,29 +83,41 @@ protected DateHistogramAggregationBuilder createTestAggregatorBuilder() { factory.offset(randomIntBetween(0, 100000)); } if (randomBoolean()) { - int branch = randomInt(5); - switch (branch) { + List order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); + } + } + return factory; + } + + private List randomOrder() { + List orders = new ArrayList<>(); + switch (randomInt(4)) { case 0: - factory.order(Order.COUNT_ASC); + orders.add(BucketOrder.key(randomBoolean())); break; case 1: - factory.order(Order.COUNT_DESC); + orders.add(BucketOrder.count(randomBoolean())); break; case 2: - factory.order(Order.KEY_ASC); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 3: - factory.order(Order.KEY_DESC); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 4: - factory.order(Order.aggregation("foo", true)); - break; - case 5: - factory.order(Order.aggregation("foo", false)); + int numOrders = randomIntBetween(1, 3); + for (int i = 0; i < numOrders; i++) { + orders.addAll(randomOrder()); + } break; - } + default: + fail(); } - return factory; + return orders; } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index c8803b7e790e1..6710bcdb23168 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -103,7 +104,7 @@ public void testIssue10719() throws Exception { SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") - .order(Terms.Order.aggregation("sample>max_price.value", asc)) + .order(BucketOrder.aggregation("sample>max_price.value", asc)) .subAggregation(sampler("sample").shardSize(100) .subAggregation(max("max_price").field("price"))) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index ca106721fcc99..2363c21c7d112 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -41,6 +41,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -134,6 +135,7 @@ public void setupSuiteScopeCluster() throws Exception { .startObject() .field(SINGLE_VALUED_FIELD_NAME, (double) i) .field("num_tag", i < NUM_DOCS/2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) .startArray(MULTI_VALUED_FIELD_NAME).value((double) i).value(i + 1d).endArray() .endObject())); @@ -315,7 +317,7 @@ public void testSingleValueFieldWithMaxSize() throws Exception { .field(SINGLE_VALUED_FIELD_NAME) .size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -363,15 +365,15 @@ private void testIncludeExcludeResults(double[] includes, double[] excludes, dou assertThat(bucket.getDocCount(), equalTo(1L)); } } - + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); } - + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); } - + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") @@ -399,14 +401,14 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception } } assertEquals(expectedCardinality, foundTerms.size()); - } + } public void testSingleValueFieldOrderedByTermAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -432,7 +434,7 @@ public void testSingleValueFieldOrderedByTermDesc() throws Exception { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(false))) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -453,6 +455,33 @@ public void testSingleValueFieldOrderedByTermDesc() throws Exception { } } + public void testSingleValueFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms") + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + i++; + } + } + public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") @@ -759,7 +788,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAsc() throws .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) + .order(BucketOrder.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -789,7 +818,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTer terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation( terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode( @@ -831,7 +860,7 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws .prepareSearch("idx") .addAggregation( terms("num_tags").field("num_tag").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter", asc)) + .order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).execute().actionGet(); @@ -869,7 +898,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels( terms("tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( @@ -923,7 +952,7 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet(); + .order(BucketOrder.aggregation("avg_i", true))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -941,7 +970,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("num_tags", true)) + .order(BucketOrder.aggregation("num_tags", true)) .subAggregation( terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -960,7 +989,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMet client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME + "2").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)) + .order(BucketOrder.aggregation("stats.foo", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + @@ -978,7 +1007,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric client().prepareSearch(index) .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)) + .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + @@ -996,7 +1025,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationDesc() throws .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) + .order(BucketOrder.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -1026,7 +1055,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1054,7 +1083,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1082,7 +1111,7 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex .prepareSearch("idx") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.variance", asc)) + .order(BucketOrder.aggregation("stats.variance", asc)) .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet(); assertSearchResponse(response); @@ -1139,48 +1168,48 @@ public void testScriptScore() { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { double[] expectedKeys = new double[] { 1, 2, 4, 3, 7, 6, 5 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { double[] expectedKeys = new double[] { 5, 6, 7, 3, 4, 2, 1 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { double[] expectedKeys = new double[] { 6, 7, 3, 4, 5, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { double[] expectedKeys = new double[] { 6, 7, 3, 5, 4, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { double[] expectedKeys = new double[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), - Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(double[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") .setTypes("multi_sort_type") .addAggregation( terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.compound(order)).subAggregation(avg("avg_l").field("l")) + .order(BucketOrder.compound(order)).subAggregation(avg("avg_l").field("l")) .subAggregation(sum("sum_d").field("d"))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 683e7924419a3..d7bd069f2ba3d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.LongHashSet; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -27,16 +29,20 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -48,6 +54,7 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; @@ -72,6 +79,7 @@ public class HistogramIT extends ESIntegTestCase { static int interval; static int numValueBuckets, numValuesBuckets; static long[] valueCounts, valuesCounts; + static Map> expectedMultiSortBuckets; @Override protected Collection> nodePlugins() { @@ -130,16 +138,18 @@ public void setupSuiteScopeCluster() throws Exception { } List builders = new ArrayList<>(); - for (int i = 0; i < numDocs; i++) { builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder() .startObject() .field(SINGLE_VALUED_FIELD_NAME, i + 1) .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray() .field("tag", "tag" + i) + .field("constant", 1) .endObject())); } + getMultiSortDocs(builders); + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder() @@ -151,6 +161,51 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } + private void addExpectedBucket(long key, long docCount, double avg, double sum) { + Map bucketProps = new HashMap<>(); + bucketProps.put("key", key); + bucketProps.put("_count", docCount); + bucketProps.put("avg_l", avg); + bucketProps.put("sum_d", sum); + expectedMultiSortBuckets.put(key, bucketProps); + } + + private void getMultiSortDocs(List builders) throws IOException { + expectedMultiSortBuckets = new HashMap<>(); + addExpectedBucket(1, 3, 1, 6); + addExpectedBucket(2, 3, 2, 6); + addExpectedBucket(3, 2, 3, 3); + addExpectedBucket(4, 2, 3, 4); + addExpectedBucket(5, 2, 5, 3); + addExpectedBucket(6, 1, 5, 1); + addExpectedBucket(7, 1, 5, 1); + + assertAcked(client().admin().indices().prepareCreate("sort_idx") + .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=double").get()); + for (int i = 1; i <= 3; i++) { + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject())); + } + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject())); + builders.add(client().prepareIndex("sort_idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject())); + } + public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) @@ -241,7 +296,7 @@ public void testSingleValuedFieldWithRandomOffset() throws Exception { public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_ASC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -252,7 +307,6 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -264,7 +318,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -275,7 +329,6 @@ public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1); @@ -287,7 +340,7 @@ public void testsingleValuedFieldOrderedByKeyDesc() throws Exception { public void testSingleValuedFieldOrderedByCountAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(true))) .execute().actionGet(); assertSearchResponse(response); @@ -299,7 +352,6 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); LongHashSet buckets = new LongHashSet(); - // TODO: use diamond once JI-9019884 is fixed List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MIN_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -316,7 +368,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { public void testSingleValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_DESC)) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.count(false))) .execute().actionGet(); assertSearchResponse(response); @@ -328,7 +380,6 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); LongHashSet buckets = new LongHashSet(); - // TODO: use diamond once JI-9019884 is fixed List histoBuckets = new ArrayList<>(histo.getBuckets()); long previousCount = Long.MAX_VALUE; for (int i = 0; i < numValueBuckets; ++i) { @@ -361,7 +412,6 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -390,7 +440,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("sum", true)) + .order(BucketOrder.aggregation("sum", true)) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -404,7 +454,6 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { LongHashSet visited = new LongHashSet(); double previousSum = Double.NEGATIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -434,7 +483,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("sum", false)) + .order(BucketOrder.aggregation("sum", false)) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -448,7 +497,6 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception LongHashSet visited = new LongHashSet(); double previousSum = Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -478,7 +526,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("stats.sum", false)) + .order(BucketOrder.aggregation("stats.sum", false)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))) .execute().actionGet(); @@ -492,7 +540,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws LongHashSet visited = new LongHashSet(); double previousSum = Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed + List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -523,7 +571,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro histogram("histo") .field(SINGLE_VALUED_FIELD_NAME) .interval(interval) - .order(Histogram.Order.aggregation("filter>max", asc)) + .order(BucketOrder.aggregation("filter>max", asc)) .subAggregation(filter("filter", matchAllQuery()) .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) .execute().actionGet(); @@ -538,7 +586,6 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro LongHashSet visited = new LongHashSet(); double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY; - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); @@ -558,6 +605,62 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo") + .field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getBuckets().size(), equalTo(numValueBuckets)); + + List buckets = new ArrayList<>(histo.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + } + + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { + boolean asc = true; + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("inner_histo>avg", asc)) + .subAggregation(histogram("inner_histo") + .interval(interval) + .field(MULTI_VALUED_FIELD_NAME) + .subAggregation(avg("avg").field("value")))) + .execute().actionGet(); + fail("Expected an exception"); + } catch (SearchPhaseExecutionException e) { + ElasticsearchException[] rootCauses = e.guessRootCauses(); + if (rootCauses.length == 1) { + ElasticsearchException rootCause = rootCauses[0]; + if (rootCause instanceof AggregationExecutionException) { + AggregationExecutionException aggException = (AggregationExecutionException) rootCause; + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); + } else { + throw e; + } + } else { + throw e; + } + } + } + public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation( @@ -614,7 +717,7 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC)) + .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -625,7 +728,6 @@ public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { assertThat(histo.getName(), equalTo("histo")); assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets)); - // TODO: use diamond once JI-9019884 is fixed List buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValuesBuckets; ++i) { Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1); @@ -1036,4 +1138,74 @@ public void testDontCacheScripts() throws Exception { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(1L)); } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 4, 3, 7, 6, 5 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { + long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); + } + + public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { + long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { + long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); + } + + public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { + long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); + } + + public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); + } + + private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { + SearchResponse response = client() + .prepareSearch("sort_idx") + .setTypes("type") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histogram = response.getAggregations().get("histo"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histo")); + assertThat(histogram.getBuckets().size(), equalTo(expectedKeys.length)); + + int i = 0; + for (Histogram.Bucket bucket : histogram.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo(expectedKeys[i])); + assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count"))); + Avg avg = bucket.getAggregations().get("avg_l"); + assertThat(avg, notNullValue()); + assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l"))); + Sum sum = bucket.getAggregations().get("sum_d"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d"))); + i++; + } + } + + private long key(Histogram.Bucket bucket) { + return ((Number) bucket.getKey()).longValue(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java index ea61a8168adef..ee22b2291773d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java @@ -21,7 +21,10 @@ import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.BucketOrder; + +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; @@ -54,26 +57,11 @@ protected HistogramAggregationBuilder createTestAggregatorBuilder() { factory.offset(randomIntBetween(0, 100000)); } if (randomBoolean()) { - int branch = randomInt(5); - switch (branch) { - case 0: - factory.order(Order.COUNT_ASC); - break; - case 1: - factory.order(Order.COUNT_DESC); - break; - case 2: - factory.order(Order.KEY_ASC); - break; - case 3: - factory.order(Order.KEY_DESC); - break; - case 4: - factory.order(Order.aggregation("foo", true)); - break; - case 5: - factory.order(Order.aggregation("foo", false)); - break; + List order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); } } return factory; @@ -102,4 +90,31 @@ public void testInvalidBounds() { assertThat(ex.getMessage(), equalTo("maxBound [0.4] must be greater than minBound [0.5]")); } + private List randomOrder() { + List orders = new ArrayList<>(); + switch (randomInt(4)) { + case 0: + orders.add(BucketOrder.key(randomBoolean())); + break; + case 1: + orders.add(BucketOrder.count(randomBoolean())); + break; + case 2: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 3: + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + break; + case 4: + int numOrders = randomIntBetween(1, 3); + for (int i = 0; i < numOrders; i++) { + orders.addAll(randomOrder()); + } + break; + default: + fail(); + } + return orders; + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index a54dc3e2f5edf..565cdaaa87e0a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -40,6 +40,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -121,6 +122,7 @@ public void setupSuiteScopeCluster() throws Exception { .field(SINGLE_VALUED_FIELD_NAME, i) .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray() .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) .endObject()); } indexRandom(true, lowCardBuilders); @@ -392,7 +394,7 @@ public void testSingleValueFieldWithMaxSize() throws Exception { .field(SINGLE_VALUED_FIELD_NAME) .size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -417,7 +419,7 @@ public void testSingleValueFieldOrderedByTermAsc() throws Exception { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) + .order(BucketOrder.key(true))) .execute().actionGet(); assertSearchResponse(response); @@ -441,7 +443,7 @@ public void testSingleValueFieldOrderedByTermDesc() throws Exception { .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(false))) + .order(BucketOrder.key(false))) .execute().actionGet(); assertSearchResponse(response); @@ -462,6 +464,31 @@ public void testSingleValueFieldOrderedByTermDesc() throws Exception { } } + public void testSingleValueFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms") + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))) + .execute().actionGet(); + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("" + i)); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + i++; + } + } + public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(terms("terms") @@ -769,7 +796,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAsc() throws .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -798,7 +825,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithTermsS .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation( avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation( @@ -842,7 +869,7 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws .addAggregation(terms("num_tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter", asc)) + .order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) ).get(); @@ -879,7 +906,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels( .addAggregation(terms("tags") .field("num_tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) .subAggregation(filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()) .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) @@ -934,7 +961,7 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true)) + .order(BucketOrder.aggregation("avg_i", true)) ).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -952,7 +979,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("num_tags", true)) + .order(BucketOrder.aggregation("num_tags", true)) .subAggregation(terms("num_tags").field("num_tags") .collectMode(randomFrom(SubAggCollectionMode.values()))) ).execute().actionGet(); @@ -972,7 +999,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMet .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)) + .order(BucketOrder.aggregation("stats.foo", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -992,7 +1019,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)) + .order(BucketOrder.aggregation("stats", true)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1011,7 +1038,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationDesc() throws .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", asc)) + .order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1043,7 +1070,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1073,7 +1100,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.avg", asc)) + .order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1103,7 +1130,7 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.variance", asc)) + .order(BucketOrder.aggregation("stats.variance", asc)) .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) ).execute().actionGet(); @@ -1129,47 +1156,47 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { long[] expectedKeys = new long[] { 1, 2, 4, 3, 7, 6, 5 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), - Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(long[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx").setTypes("multi_sort_type") .addAggregation(terms("terms") .field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.compound(order)) + .order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")) .subAggregation(sum("sum_d").field("d")) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index e1e8f1ba660cd..038227239cc0f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -190,122 +191,122 @@ private void assertSubset(Histogram histo1, Histogram histo2, long minDocCount) } public void testStringTermAsc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.key(true)); } public void testStringScriptTermAsc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.key(true)); } public void testStringTermDesc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.key(false)); } public void testStringScriptTermDesc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.key(false)); } public void testStringCountAsc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(true)); } public void testStringScriptCountAsc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(true)); } public void testStringCountDesc() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(false)); } public void testStringScriptCountDesc() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(false)); } public void testStringCountAscWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*", true); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(true), ".*a.*", true); } public void testStringScriptCountAscWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*", true); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(true), ".*a.*", true); } public void testStringCountDescWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*", true); + testMinDocCountOnTerms("s", Script.NO, BucketOrder.count(false), ".*a.*", true); } public void testStringScriptCountDescWithInclude() throws Exception { - testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*", true); + testMinDocCountOnTerms("s", Script.YES, BucketOrder.count(false), ".*a.*", true); } public void testLongTermAsc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.key(true)); } public void testLongScriptTermAsc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.key(true)); } public void testLongTermDesc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.key(false)); } public void testLongScriptTermDesc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.key(false)); } public void testLongCountAsc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.count(true)); } public void testLongScriptCountAsc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.count(true)); } public void testLongCountDesc() throws Exception { - testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("l", Script.NO, BucketOrder.count(false)); } public void testLongScriptCountDesc() throws Exception { - testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("l", Script.YES, BucketOrder.count(false)); } public void testDoubleTermAsc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(true)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.key(true)); } public void testDoubleScriptTermAsc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(true)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.key(true)); } public void testDoubleTermDesc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(false)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.key(false)); } public void testDoubleScriptTermDesc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(false)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.key(false)); } public void testDoubleCountAsc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(true)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.count(true)); } public void testDoubleScriptCountAsc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(true)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.count(true)); } public void testDoubleCountDesc() throws Exception { - testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(false)); + testMinDocCountOnTerms("d", Script.NO, BucketOrder.count(false)); } public void testDoubleScriptCountDesc() throws Exception { - testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(false)); + testMinDocCountOnTerms("d", Script.YES, BucketOrder.count(false)); } - private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception { + private void testMinDocCountOnTerms(String field, Script script, BucketOrder order) throws Exception { testMinDocCountOnTerms(field, script, order, null, true); } - private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include, boolean retry) throws Exception { + private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) @@ -342,38 +343,38 @@ private void testMinDocCountOnTerms(String field, Script script, Terms.Order ord } public void testHistogramCountAsc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.COUNT_ASC); + testMinDocCountOnHistogram(BucketOrder.count(true)); } public void testHistogramCountDesc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.COUNT_DESC); + testMinDocCountOnHistogram(BucketOrder.count(false)); } public void testHistogramKeyAsc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.KEY_ASC); + testMinDocCountOnHistogram(BucketOrder.key(true)); } public void testHistogramKeyDesc() throws Exception { - testMinDocCountOnHistogram(Histogram.Order.KEY_DESC); + testMinDocCountOnHistogram(BucketOrder.key(false)); } public void testDateHistogramCountAsc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.COUNT_ASC); + testMinDocCountOnDateHistogram(BucketOrder.count(true)); } public void testDateHistogramCountDesc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.COUNT_DESC); + testMinDocCountOnDateHistogram(BucketOrder.count(false)); } public void testDateHistogramKeyAsc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.KEY_ASC); + testMinDocCountOnDateHistogram(BucketOrder.key(true)); } public void testDateHistogramKeyDesc() throws Exception { - testMinDocCountOnDateHistogram(Histogram.Order.KEY_DESC); + testMinDocCountOnDateHistogram(BucketOrder.key(false)); } - private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception { + private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) @@ -393,7 +394,7 @@ private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception } } - private void testMinDocCountOnDateHistogram(Histogram.Order order) throws Exception { + private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") .setSize(0) .setQuery(QUERY) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index f6db12a7f6d8c..5b8c3b878c19a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -150,7 +151,7 @@ public void testTerms(String fieldName) { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(agg.builder()).order(Terms.Order.aggregation(agg.sortKey(), asc))) + .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) .execute().actionGet(); assertSearchResponse(response); @@ -175,7 +176,7 @@ public void testLongHistogram() { SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo") - .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(Histogram.Order.aggregation(agg.sortKey(), asc))) + .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) .execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index 6118cb69ee747..aaf366c7c7b06 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -349,13 +350,13 @@ public void testSimpleReverseNestedToNested1() throws Exception { SearchResponse response = client().prepareSearch("idx2") .addAggregation(nested("nested1", "nested1.nested2") .subAggregation( - terms("field2").field("nested1.nested2.field2").order(Terms.Order.term(true)) + terms("field2").field("nested1.nested2.field2").order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .size(10000) .subAggregation( reverseNested("nested1_to_field1").path("nested1") .subAggregation( - terms("field1").field("nested1.field1").order(Terms.Order.term(true)) + terms("field1").field("nested1.field1").order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values())) ) ) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index ebd078de67402..328ce538feb98 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -99,7 +100,7 @@ public void testIssue10719() throws Exception { SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_THEN_FETCH) .addAggregation(terms("genres") .field("genre") - .order(Terms.Order.aggregation("sample>max_price.value", asc)) + .order(BucketOrder.aggregation("sample>max_price.value", asc)) .subAggregation(sampler("sample").shardSize(100) .subAggregation(max("max_price").field("price"))) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 748c5f886f669..4c03ca9a84ea6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.HashMap; import java.util.List; @@ -39,7 +40,7 @@ public void testNoShardSizeString() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -62,7 +63,7 @@ public void testShardSizeEqualsSizeString() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -86,7 +87,7 @@ public void testWithShardSizeString() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -110,7 +111,7 @@ public void testWithShardSizeStringSingleShard() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -133,7 +134,7 @@ public void testNoShardSizeTermOrderString() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -156,7 +157,7 @@ public void testNoShardSizeLong() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -179,7 +180,7 @@ public void testShardSizeEqualsSizeLong() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -202,7 +203,7 @@ public void testWithShardSizeLong() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -226,7 +227,7 @@ public void testWithShardSizeLongSingleShard() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -249,7 +250,7 @@ public void testNoShardSizeTermOrderLong() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -272,7 +273,7 @@ public void testNoShardSizeDouble() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -295,7 +296,7 @@ public void testShardSizeEqualsSizeDouble() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -318,7 +319,7 @@ public void testWithShardSizeDouble() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -341,7 +342,7 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1) .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false))) + .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); @@ -364,7 +365,7 @@ public void testNoShardSizeTermOrderDouble() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") .setQuery(matchAllQuery()) .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java index df69cfcfa9383..0c93ff2f6bbd5 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java @@ -42,10 +42,12 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; @@ -72,6 +74,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; @@ -129,9 +132,15 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < 5; i++) { builders.add(client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val" + i).field("i", i) - .field("tag", i < 5 / 2 + 1 ? "more" : "less").startArray(MULTI_VALUED_FIELD_NAME).value("val" + i) - .value("val" + (i + 1)).endArray().endObject())); + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + i) + .field("i", i) + .field("constant", 1) + .field("tag", i < 5 / 2 + 1 ? "more" : "less") + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + i) + .value("val" + (i + 1)) + .endArray().endObject())); } getMultiSortDocs(builders); @@ -456,15 +465,15 @@ public void testSingleValueFieldWithExactTermFiltering() throws Exception { } } - + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); } - + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); } - + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") @@ -492,8 +501,8 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception } } assertEquals(expectedCardinality, foundTerms.size()); - } - + } + public void testSingleValueFieldWithMaxSize() throws Exception { SearchResponse response = client() @@ -503,7 +512,7 @@ public void testSingleValueFieldWithMaxSize() throws Exception { .executionHint(randomExecutionHint()) .field(SINGLE_VALUED_FIELD_NAME).size(20) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values + .order(BucketOrder.key(true))) // we need to sort by terms cause we're checking the first 20 values .execute().actionGet(); assertSearchResponse(response); @@ -527,7 +536,7 @@ public void testSingleValueFieldOrderedByTermAsc() throws Exception { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))).execute() + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))).execute() .actionGet(); assertSearchResponse(response); @@ -552,7 +561,7 @@ public void testSingleValueFieldOrderedByTermDesc() throws Exception { .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(false))).execute() + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(false))).execute() .actionGet(); assertSearchResponse(response); @@ -944,7 +953,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAsc() throws .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -966,6 +975,34 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAsc() throws } } + public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .setTypes("type") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(5)); + + int i = 0; + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + assertThat(key(bucket), equalTo("val" + i)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Max max = bucket.getAggregations().get("max_constant"); + assertThat(max, notNullValue()); + assertThat(max.getValue(), equalTo((double) 1)); + i++; + } + } + public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { @@ -975,7 +1012,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("inner_terms>avg", asc)) + .order(BucketOrder.aggregation("inner_terms>avg", asc)) .subAggregation(terms("inner_terms").field(MULTI_VALUED_FIELD_NAME).subAggregation(avg("avg").field("i")))) .execute().actionGet(); fail("Expected an exception"); @@ -985,7 +1022,7 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { ElasticsearchException rootCause = rootCauses[0]; if (rootCause instanceof AggregationExecutionException) { AggregationExecutionException aggException = (AggregationExecutionException) rootCause; - assertThat(aggException.getMessage(), Matchers.startsWith("Invalid terms aggregation order path")); + assertThat(aggException.getMessage(), Matchers.startsWith("Invalid aggregation order path")); } else { throw e; } @@ -1002,7 +1039,7 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws .setTypes("type") .addAggregation( terms("tags").executionHint(randomExecutionHint()).field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("filter", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("filter", asc)) .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).execute().actionGet(); assertSearchResponse(response); @@ -1041,7 +1078,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels( .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>filter2>stats.max", asc)) + .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( @@ -1104,7 +1141,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( @@ -1167,7 +1204,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS .executionHint(randomExecutionHint()) .field("tag") .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) .subAggregation( filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( @@ -1222,7 +1259,7 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet(); + .order(BucketOrder.aggregation("avg_i", true))).execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -1240,7 +1277,7 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("values", true)) + .order(BucketOrder.aggregation("values", true)) .subAggregation(terms("values").field("i").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -1262,7 +1299,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUknownMet .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i"))) + .order(BucketOrder.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i"))) .execute().actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + "with an unknown specified metric to order by. response had " + response.getFailedShards() + " failed shards."); @@ -1281,7 +1318,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute() + .order(BucketOrder.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute() .actionGet(); fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " @@ -1300,7 +1337,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationDesc() throws .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("avg_i", asc)) .subAggregation(avg("avg_i").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1331,7 +1368,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1361,7 +1398,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws .setTypes("type") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) .subAggregation(stats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1393,7 +1430,7 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.sum_of_squares", asc)) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) .subAggregation(extendedStats("stats").field("i"))).execute().actionGet(); assertSearchResponse(response); @@ -1425,7 +1462,7 @@ public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Ex .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(Terms.Order.aggregation("stats.sum_of_squares", asc)) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) .subAggregation(extendedStats("stats").field("i")) .subAggregation(terms("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())))) .execute().actionGet(); @@ -1464,46 +1501,46 @@ public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Ex public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val4", "val3", "val7", "val6", "val5" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception { String[] expectedKeys = new String[] { "val5", "val6", "val7", "val3", "val4", "val2", "val1" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { String[] expectedKeys = new String[] { "val6", "val7", "val3", "val4", "val5", "val1", "val2" }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { String[] expectedKeys = new String[] { "val6", "val7", "val3", "val5", "val4", "val1", "val2" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { String[] expectedKeys = new String[] { "val2", "val1", "val4", "val5", "val3", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.count(false), Terms.Order.aggregation("sum_d", false), - Terms.Order.aggregation("avg_l", false)); + assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true)); + assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } - private void assertMultiSortResponse(String[] expectedKeys, Terms.Order... order) { + private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order) { SearchResponse response = client() .prepareSearch("sort_idx") .addAggregation( terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.compound(order)) + .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.compound(order)) .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 9ed32ca2e7b34..204de33440d7b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -27,8 +27,8 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -347,7 +347,7 @@ public void testStringValueFieldDocCountAsc() throws Exception { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -360,7 +360,7 @@ public void testStringValueFieldDocCountAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -378,7 +378,7 @@ public void testStringValueFieldTermSortAsc() throws Exception { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -391,7 +391,7 @@ public void testStringValueFieldTermSortAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -409,7 +409,7 @@ public void testStringValueFieldTermSortDesc() throws Exception { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -422,7 +422,7 @@ public void testStringValueFieldTermSortDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -440,7 +440,7 @@ public void testStringValueFieldSubAggAsc() throws Exception { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -454,7 +454,7 @@ public void testStringValueFieldSubAggAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -473,7 +473,7 @@ public void testStringValueFieldSubAggDesc() throws Exception { .field(STRING_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -487,7 +487,7 @@ public void testStringValueFieldSubAggDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -583,7 +583,7 @@ public void testLongValueFieldDocCountAsc() throws Exception { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -596,7 +596,7 @@ public void testLongValueFieldDocCountAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -614,7 +614,7 @@ public void testLongValueFieldTermSortAsc() throws Exception { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -627,7 +627,7 @@ public void testLongValueFieldTermSortAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -645,7 +645,7 @@ public void testLongValueFieldTermSortDesc() throws Exception { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -658,7 +658,7 @@ public void testLongValueFieldTermSortDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -676,7 +676,7 @@ public void testLongValueFieldSubAggAsc() throws Exception { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -690,7 +690,7 @@ public void testLongValueFieldSubAggAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -709,7 +709,7 @@ public void testLongValueFieldSubAggDesc() throws Exception { .field(LONG_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) .execute().actionGet(); @@ -723,7 +723,7 @@ public void testLongValueFieldSubAggDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) .execute().actionGet(); @@ -819,7 +819,7 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -832,7 +832,7 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.count(true)) + .order(BucketOrder.count(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -850,7 +850,7 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -863,7 +863,7 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(true)) + .order(BucketOrder.key(true)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -881,7 +881,7 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -894,7 +894,7 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.term(false)) + .order(BucketOrder.key(false)) .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); @@ -912,7 +912,7 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -926,7 +926,7 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", true)) + .order(BucketOrder.aggregation("sortAgg", true)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -945,7 +945,7 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { .field(DOUBLE_FIELD_NAME) .showTermDocCountError(true) .size(10000).shardSize(10000) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -959,7 +959,7 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { .showTermDocCountError(true) .size(size) .shardSize(shardSize) - .order(Order.aggregation("sortAgg", false)) + .order(BucketOrder.aggregation("sortAgg", false)) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) .execute().actionGet(); @@ -968,7 +968,7 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { assertUnboundedDocCountError(size, accurateResponse, testResponse); } - + /** * Test a case where we know exactly how many of each term is on each shard * so we know the exact error value for each term. To do this we search over @@ -984,39 +984,39 @@ public void testFixedDocs() throws Exception { .collectMode(randomFrom(SubAggCollectionMode.values()))) .execute().actionGet(); assertSearchResponse(response); - + Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getDocCountError(), equalTo(46L)); List buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(5)); - + Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("A")); assertThat(bucket.getDocCount(), equalTo(100L)); assertThat(bucket.getDocCountError(), equalTo(0L)); - + bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("Z")); assertThat(bucket.getDocCount(), equalTo(52L)); assertThat(bucket.getDocCountError(), equalTo(2L)); - + bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("C")); assertThat(bucket.getDocCount(), equalTo(50L)); assertThat(bucket.getDocCountError(), equalTo(15L)); - - + + bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("G")); assertThat(bucket.getDocCount(), equalTo(45L)); assertThat(bucket.getDocCountError(), equalTo(2L)); - + bucket = buckets.get(4); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("B")); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index f7e3d9a61b598..c094c245dac03 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -129,7 +130,7 @@ public void testShardMinDocCountTermsTest() throws Exception { // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned SearchResponse response = client().prepareSearch(index) .addAggregation( - terms("myTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true)) + terms("myTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()).order(BucketOrder.key(true)) ) .execute() .actionGet(); @@ -140,7 +141,7 @@ public void testShardMinDocCountTermsTest() throws Exception { response = client().prepareSearch(index) .addAggregation( - terms("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true)) + terms("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()).order(BucketOrder.key(true)) ) .execute() .actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java index 42f6ef78f4b17..73c275cfd235d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsTests.java @@ -23,10 +23,10 @@ import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.ArrayList; import java.util.List; @@ -155,8 +155,12 @@ protected TermsAggregationBuilder createTestAggregatorBuilder() { factory.includeExclude(incExc); } if (randomBoolean()) { - List order = randomOrder(); - factory.order(order); + List order = randomOrder(); + if(order.size() == 1 && randomBoolean()) { + factory.order(order.get(0)); + } else { + factory.order(order); + } } if (randomBoolean()) { factory.showTermDocCountError(randomBoolean()); @@ -164,20 +168,20 @@ protected TermsAggregationBuilder createTestAggregatorBuilder() { return factory; } - private List randomOrder() { - List orders = new ArrayList<>(); + private List randomOrder() { + List orders = new ArrayList<>(); switch (randomInt(4)) { case 0: - orders.add(Terms.Order.term(randomBoolean())); + orders.add(BucketOrder.key(randomBoolean())); break; case 1: - orders.add(Terms.Order.count(randomBoolean())); + orders.add(BucketOrder.count(randomBoolean())); break; case 2: - orders.add(Terms.Order.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 3: - orders.add(Terms.Order.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); + orders.add(BucketOrder.aggregation(randomAlphaOfLengthBetween(3, 20), randomAlphaOfLengthBetween(3, 20), randomBoolean())); break; case 4: int numOrders = randomIntBetween(1, 3); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index aafad557bc681..17c29dc1ae609 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.InternalAggregationTestCase; import org.joda.time.DateTime; @@ -55,7 +56,7 @@ protected InternalDateHistogram createTestInstance(String name, List pipelineAggregators, Map metaData) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java index ff95984bc3206..96833091a11b2 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.ArrayList; import java.util.HashSet; @@ -37,7 +38,7 @@ public class LongTermsTests extends InternalTermsTestCase { String name, List pipelineAggregators, Map metaData) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java index 64e814bd8192a..abac0f0b03a98 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.ArrayList; import java.util.HashSet; @@ -38,7 +39,7 @@ public class StringTermsTests extends InternalTermsTestCase { String name, List pipelineAggregators, Map metaData) { - Terms.Order order = Terms.Order.count(false); + BucketOrder order = BucketOrder.count(false); long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 1648d8ede9fdf..7b93653fff868 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -70,7 +71,7 @@ public void testTermsAggregator() throws Exception { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionMode.toString()) .field("string") - .order(Terms.Order.term(true)); + .order(BucketOrder.key(true)); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); fieldType.setName("string"); fieldType.setHasDocValues(true ); @@ -99,7 +100,7 @@ public void testMixLongAndDouble() throws Exception { TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG) .executionHint(executionMode.toString()) .field("number") - .order(Terms.Order.term(true)); + .order(BucketOrder.key(true)); List aggs = new ArrayList<> (); int numLongs = randomIntBetween(1, 3); for (int i = 0; i < numLongs; i++) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index c952f43eb3032..c51c0aec4bb61 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -36,8 +36,8 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.avg.Avg; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -326,7 +326,7 @@ public void testScriptMultiValuedWithParams() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>avg", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>avg", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(avg("avg").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 3903dd8b0bc76..7de333e8127ca 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -30,9 +30,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -595,7 +595,8 @@ public void testEmptySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>extendedStats.avg", true))) + .addAggregation(terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) .subAggregation( filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 5b56e6b7efbf2..586af22755cd6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -29,11 +29,11 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -485,7 +485,7 @@ public void testOrderBySubAggregation() { .subAggregation( percentileRanks("percentile_ranks").field("value").method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits).values(99)) - .order(Order.aggregation("percentile_ranks", "99", asc))).execute().actionGet(); + .order(BucketOrder.aggregation("percentile_ranks", "99", asc))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -506,7 +506,7 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentileRanks("ranks").method(PercentilesMethod.HDR).values(99).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 56fb14402ad5d..ae745e1f1ad03 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -30,11 +30,11 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -474,7 +474,7 @@ public void testOrderBySubAggregation() { .method(PercentilesMethod.HDR) .numberOfSignificantValueDigits(sigDigits) .percentiles(99)) - .order(Order.aggregation("percentiles", "99", asc))).execute().actionGet(); + .order(BucketOrder.aggregation("percentiles", "99", asc))).execute().actionGet(); assertHitCount(searchResponse, 10); @@ -497,7 +497,7 @@ public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentiles("percentiles").method(PercentilesMethod.HDR).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java index 03eb9a092372a..a192b3c4a12c1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.max.Max; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -328,7 +328,7 @@ public void testScriptMultiValuedWithParams() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>max", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>max", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(max("max").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java index cba2ba9eb97c8..7f2522c04bb50 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java @@ -29,8 +29,8 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.min.Min; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -340,7 +340,7 @@ public void testScriptMultiValuedWithParams() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>min", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>min", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(min("min").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 9231f09396307..0fcf794ee1d83 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -32,8 +32,8 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -447,7 +447,7 @@ public void testScriptMultiValuedWithParams() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>stats.avg", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 227ffc7251bb3..86f59659ebc18 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -36,8 +36,8 @@ import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -325,7 +325,7 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>sum", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index f1943747ceb43..11ff1edbc537e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -30,12 +30,12 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -435,7 +435,7 @@ public void testOrderBySubAggregation() { .addAggregation( histogram("histo").field("value").interval(2L) .subAggregation(randomCompression(percentileRanks("percentile_ranks").field("value").values(99))) - .order(Order.aggregation("percentile_ranks", "99", asc))) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -457,7 +457,7 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentileRanks("ranks").method(PercentilesMethod.TDIGEST).values(99).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 2589e9977a6c9..89c7d12c746fa 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -30,12 +30,12 @@ import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; +import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -419,7 +419,7 @@ public void testOrderBySubAggregation() { .addAggregation( histogram("histo").field("value").interval(2L) .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) - .order(Order.aggregation("percentiles", "99", asc))) + .order(BucketOrder.aggregation("percentiles", "99", asc))) .execute().actionGet(); assertHitCount(searchResponse, 10); @@ -442,7 +442,7 @@ public void testOrderBySubAggregation() { public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation( - terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) .subAggregation(filter("filter", termQuery("value", 100)) .subAggregation(percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value")))) .get(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 6819fddf3e3e5..563fac1ba7df7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.rescore.RescoreBuilder; @@ -398,7 +399,7 @@ public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception { .executionHint(randomExecutionHint()) .collectMode(SubAggCollectionMode.BREADTH_FIRST) .field(TERMS_AGGS_FIELD) - .order(Terms.Order.aggregation("max", false)) + .order(BucketOrder.aggregation("max", false)) .subAggregation(max("max").field(SORT_FIELD)) .subAggregation(topHits("hits").size(3)) ).get(); @@ -494,7 +495,7 @@ public void testSortByBucket() throws Exception { .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) - .order(Terms.Order.aggregation("max_sort", false)) + .order(BucketOrder.aggregation("max_sort", false)) .subAggregation( topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true) ) @@ -535,7 +536,7 @@ public void testFieldCollapsing() throws Exception { .setQuery(matchQuery("text", "term rare")) .addAggregation( terms("terms").executionHint(randomExecutionHint()).field("group") - .order(Terms.Order.aggregation("max_score", false)).subAggregation(topHits("hits").size(1)) + .order(BucketOrder.aggregation("max_score", false)).subAggregation(topHits("hits").size(1)) .subAggregation(max("max_score").field("value"))).get(); assertSearchResponse(response); @@ -908,7 +909,6 @@ public void testTopHitsInNested() throws Exception { histogram("dates") .field("date") .interval(5) - .order(Histogram.Order.aggregation("to-comments", true)) .subAggregation( nested("to-comments", "comments") .subAggregation(topHits("comments") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index 4f6ff0e32eda8..1d29518a300e6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -24,10 +24,10 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -129,7 +129,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -211,7 +211,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -264,7 +264,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -337,7 +337,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 607124ecb159d..fea143bddccfd 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -26,12 +26,12 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -200,7 +200,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -300,7 +300,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -362,7 +362,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -445,7 +445,7 @@ public void testBadSigmaAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -475,7 +475,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 632f11f7ec73d..50b512ee1944b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -25,11 +25,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -138,7 +138,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -230,7 +230,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -335,7 +335,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -413,7 +413,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index 04fdd0c313354..33678f146dbc2 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -24,11 +24,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -135,7 +135,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -227,7 +227,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -285,7 +285,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -363,7 +363,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index e23e5441431eb..62f9ad462e902 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -133,7 +134,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -248,7 +249,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -301,7 +302,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -435,7 +436,7 @@ public void testBadPercents_asSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -466,7 +467,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -525,7 +526,7 @@ public void testNestedWithDecimal() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index 231005f1b5b62..ff5a85f198ef4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -24,11 +24,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucket; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -136,7 +136,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -230,7 +230,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -289,7 +289,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -368,7 +368,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index 048dfac8648d4..69451435d58b5 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -24,10 +24,10 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -126,7 +126,7 @@ public void testDocCountAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) @@ -202,7 +202,7 @@ public void testMetricAsSubAgg() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -252,7 +252,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue) @@ -322,7 +322,7 @@ public void testNested() throws Exception { .addAggregation( terms("terms") .field("tag") - .order(Order.term(true)) + .order(BucketOrder.key(true)) .subAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds(minRandomValue, maxRandomValue)) diff --git a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc index 99b871730e6d8..1fe945077fdb7 100644 --- a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -67,3 +67,7 @@ key [2005-01-01T00:00:00.000Z], date [2005], doc_count [1] key [2007-01-01T00:00:00.000Z], date [2007], doc_count [2] key [2008-01-01T00:00:00.000Z], date [2008], doc_count [3] -------------------------------------------------- + +===== Order + +Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc index 28e9cd3ecd0ff..59bb555401c5b 100644 --- a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc @@ -42,3 +42,7 @@ for (Histogram.Bucket entry : agg.getBuckets()) { logger.info("key [{}], doc_count [{}]", key, docCount); } -------------------------------------------------- + +===== Order + +Supports the same order functionality as the <>. diff --git a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc b/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc index ad83faccd31c5..db584fd4cedd2 100644 --- a/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/terms-aggregation.asciidoc @@ -39,7 +39,14 @@ for (Terms.Bucket entry : genders.getBuckets()) { } -------------------------------------------------- -==== Order +===== Order + +Import bucket ordering strategy classes: + +[source,java] +-------------------------------------------------- +import org.elasticsearch.search.aggregations.BucketOrder; +-------------------------------------------------- Ordering the buckets by their `doc_count` in an ascending manner: @@ -48,7 +55,7 @@ Ordering the buckets by their `doc_count` in an ascending manner: AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.count(true)) + .order(BucketOrder.count(true)) -------------------------------------------------- Ordering the buckets alphabetically by their terms in an ascending manner: @@ -58,7 +65,7 @@ Ordering the buckets alphabetically by their terms in an ascending manner: AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.term(true)) + .order(BucketOrder.key(true)) -------------------------------------------------- Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name): @@ -68,7 +75,22 @@ Ordering the buckets by single value metrics sub-aggregation (identified by the AggregationBuilders .terms("genders") .field("gender") - .order(Terms.Order.aggregation("avg_height", false)) + .order(BucketOrder.aggregation("avg_height", false)) + .subAggregation( + AggregationBuilders.avg("avg_height").field("height") + ) +-------------------------------------------------- + +Ordering the buckets by multiple criteria: + +[source,java] +-------------------------------------------------- +AggregationBuilders + .terms("genders") + .field("gender") + .order(BucketOrder.compound( // in order of priority: + BucketOrder.aggregation("avg_height", false), // sort by sub-aggregation first + BucketOrder.count(true))) // then bucket count as a tie-breaker .subAggregation( AggregationBuilders.avg("avg_height").field("height") ) diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index b7619b175df3d..47265a0b224cd 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -390,3 +390,10 @@ POST /sales/_search?size=0 // TEST[setup:sales] <1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. + +==== Order + +By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using +the `order` setting. Supports the same `order` functionality as the <>. + +deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index de828c62aa9a7..380d06258da76 100644 --- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -179,120 +179,8 @@ POST /sales/_search?size=0 ==== Order -By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled -using the `order` setting. - -Ordering the buckets by their key - descending: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "_key" : "desc" } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -Ordering the buckets by their `doc_count` - ascending: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "_count" : "asc" } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -If the histogram aggregation has a direct metrics sub-aggregation, the latter can determine the order of the buckets: - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "price_stats.min" : "asc" } <1> - }, - "aggs" : { - "price_stats" : { "stats" : {"field" : "price"} } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -<1> The `{ "price_stats.min" : asc" }` will sort the buckets based on `min` value of their `price_stats` sub-aggregation. - -It is also possible to order the buckets based on a "deeper" aggregation in the hierarchy. This is supported as long -as the aggregations path are of a single-bucket type, where the last aggregation in the path may either by a single-bucket -one or a metrics one. If it's a single-bucket type, the order will be defined by the number of docs in the bucket (i.e. `doc_count`), -in case it's a metrics one, the same rules as above apply (where the path must indicate the metric name to sort by in case of -a multi-value metrics aggregation, and in case of a single-value metrics aggregation the sort will be applied on that value). - -The path must be defined in the following form: - -// https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form -[source,ebnf] --------------------------------------------------- -AGG_SEPARATOR = '>' ; -METRIC_SEPARATOR = '.' ; -AGG_NAME = ; -METRIC = ; -PATH = [ , ]* [ , ] ; --------------------------------------------------- - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "prices" : { - "histogram" : { - "field" : "price", - "interval" : 50, - "order" : { "promoted_products>rating_stats.avg" : "desc" } <1> - }, - "aggs" : { - "promoted_products" : { - "filter" : { "term" : { "promoted" : true }}, - "aggs" : { - "rating_stats" : { "stats" : { "field" : "rating" }} - } - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -The above will sort the buckets based on the avg rating among the promoted products - +By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using +the `order` setting. Supports the same `order` functionality as the <>. ==== Offset diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 0b028c1a940ce..90a5586d9e4fd 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -280,13 +280,14 @@ Ordering the buckets alphabetically by their terms in an ascending manner: "genres" : { "terms" : { "field" : "genre", - "order" : { "_term" : "asc" } + "order" : { "_key" : "asc" } } } } } -------------------------------------------------- +deprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term] Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name): diff --git a/docs/reference/migration/migrate_6_0/java.asciidoc b/docs/reference/migration/migrate_6_0/java.asciidoc index 5693d50852649..43feb15f84b50 100644 --- a/docs/reference/migration/migrate_6_0/java.asciidoc +++ b/docs/reference/migration/migrate_6_0/java.asciidoc @@ -26,4 +26,12 @@ When sending a request through the request builders e.g. client.prepareSearch(). be possible to call `addListener` against the returned `ListenableActionFuture`. With this change an `ActionFuture` is returned instead, which is consistent with what the `Client` methods return, hence it is not possible to associate the future with listeners. The `execute` method that accept a listener -as an argument can be used instead. \ No newline at end of file +as an argument can be used instead. + +==== `Terms.Order` and `Histogram.Order` classes replace by `BucketOrder` + +The `terms`, `histogram`, and `date_histogram` aggregation code has been refactored to use common +code for ordering buckets. The `BucketOrder` class must be used instead of `Terms.Order` and +`Histogram.Order`. The `static` methods in the `BucketOrder` class must be called instead of directly +accessing internal order instances, e.g. `BucketOrder.count(boolean)` and `BucketOrder.aggregation(String, boolean)`. +Use `BucketOrder.key(boolean)` to order the `terms` aggregation buckets by `_term`. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml index 63deebcd87053..4955dcfb4daa0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yaml @@ -10,6 +10,8 @@ setup: "properties": "number": "type" : "integer" + "date": + "type" : "date" - do: cluster.health: wait_for_status: green @@ -143,3 +145,64 @@ setup: - match: { aggregations.histo.buckets.3.key_as_string: "Value is 150.0" } - match: { aggregations.histo.buckets.3.doc_count: 1 } + +--- +"Deprecated _time order": + + - skip: + version: " - 5.99.99" + reason: _time order deprecated in 6.0, replaced by _key + features: "warnings" + + - do: + index: + index: test_1 + type: test + id: 1 + body: { "date" : "2016-01-01" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "date" : "2016-01-02" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "date" : "2016-02-01" } + + - do: + index: + index: test_1 + type: test + id: 4 + body: { "date" : "2016-03-01" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "interval" : "month", "order" : { "_time" : "desc" } } } } } + warnings: + - "Deprecated aggregation order key [_time] used, replaced by [_key]" + + - match: { hits.total: 4 } + + - length: { aggregations.histo.buckets: 3 } + + - match: { aggregations.histo.buckets.0.key_as_string: "2016-03-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.0.doc_count: 1 } + + - match: { aggregations.histo.buckets.1.key_as_string: "2016-02-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.1.doc_count: 1 } + + - match: { aggregations.histo.buckets.2.key_as_string: "2016-01-01T00:00:00.000Z" } + + - match: { aggregations.histo.buckets.2.doc_count: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index c9ba94cf61521..9cc30bbcd1b45 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -747,3 +747,57 @@ setup: - match: { aggregations.number_terms.buckets.2.key: 14.6 } - match: { aggregations.number_terms.buckets.2.doc_count: 1 } + +--- +"Deprecated _term order": + + - skip: + version: " - 5.99.99" + reason: _term order deprecated in 6.0, replaced by _key + features: "warnings" + + - do: + index: + index: test_1 + type: test + id: 1 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "order" : { "_term" : "desc" } } } } } + warnings: + - "Deprecated aggregation order key [_term] used, replaced by [_key]" + + - match: { hits.total: 3 } + + - length: { aggregations.str_terms.buckets: 2 } + + - match: { aggregations.str_terms.buckets.0.key: "bcd" } + + - is_false: aggregations.str_terms.buckets.0.key_as_string + + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + + - match: { aggregations.str_terms.buckets.1.key: "abc" } + + - is_false: aggregations.str_terms.buckets.1.key_as_string + + - match: { aggregations.str_terms.buckets.1.doc_count: 2 } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 392dcf1542a1c..178eed2dcef5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -51,7 +51,7 @@ public void testFromXContent() throws IOException { } } - private void assertParsedInstance(XContentType xContentType, BytesReference instanceAsBytes, T expectedInstance) + protected void assertParsedInstance(XContentType xContentType, BytesReference instanceAsBytes, T expectedInstance) throws IOException { XContentParser parser = createParser(XContentFactory.xContent(xContentType), instanceAsBytes); @@ -61,7 +61,7 @@ private void assertParsedInstance(XContentType xContentType, BytesReference inst assertEquals(expectedInstance.hashCode(), newInstance.hashCode()); } - private T parseInstance(XContentParser parser) throws IOException { + protected T parseInstance(XContentParser parser) throws IOException { T parsedInstance = doParseInstance(parser); assertNull(parser.nextToken()); return parsedInstance; diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java index bf65a7f4bdd4a..c38efca52037a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java @@ -100,7 +100,7 @@ protected T assertSerialization(T testInstance) throws IOException { return deserializedInstance; } - private T copyInstance(T instance) throws IOException { + protected T copyInstance(T instance) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { instance.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), @@ -112,9 +112,9 @@ private T copyInstance(T instance) throws IOException { /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + * * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + * * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 9e6bb080b1604..cdab659bdc459 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -72,6 +72,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; @@ -924,18 +925,39 @@ protected final XContentBuilder shuffleXContent(XContentBuilder builder, String. * recursive shuffling behavior can be made by passing in the names of fields which * internally should stay untouched. */ - protected static XContentBuilder shuffleXContent(XContentParser parser, boolean prettyPrint, String... exceptFieldNames) - throws IOException { - //we need a sorted map for reproducibility, as we are going to shuffle its keys and write XContent back - Map shuffledMap = shuffleMap((LinkedHashMap)parser.mapOrdered(), - new HashSet<>(Arrays.asList(exceptFieldNames))); + public XContentBuilder shuffleXContent(XContentParser parser, boolean prettyPrint, String... exceptFieldNames) throws IOException { XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()); if (prettyPrint) { xContentBuilder.prettyPrint(); } + Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken(); + if (token == Token.START_ARRAY) { + List shuffledList = shuffleList(parser.listOrderedMap(), new HashSet<>(Arrays.asList(exceptFieldNames))); + return xContentBuilder.value(shuffledList); + } + //we need a sorted map for reproducibility, as we are going to shuffle its keys and write XContent back + Map shuffledMap = shuffleMap((LinkedHashMap)parser.mapOrdered(), + new HashSet<>(Arrays.asList(exceptFieldNames))); return xContentBuilder.map(shuffledMap); } + // shuffle fields of objects in the list, but not the list itself + private static List shuffleList(List list, Set exceptFields) { + List targetList = new ArrayList<>(); + for(Object value : list) { + if (value instanceof Map) { + @SuppressWarnings("unchecked") + LinkedHashMap valueMap = (LinkedHashMap) value; + targetList.add(shuffleMap(valueMap, exceptFields)); + } else if(value instanceof List) { + targetList.add(shuffleList((List) value, exceptFields)); + } else { + targetList.add(value); + } + } + return targetList; + } + public static LinkedHashMap shuffleMap(LinkedHashMap map, Set exceptFields) { List keys = new ArrayList<>(map.keySet()); LinkedHashMap targetMap = new LinkedHashMap<>(); @@ -946,6 +968,8 @@ public static LinkedHashMap shuffleMap(LinkedHashMap valueMap = (LinkedHashMap) value; targetMap.put(key, shuffleMap(valueMap, exceptFields)); + } else if(value instanceof List && exceptFields.contains(key) == false) { + targetMap.put(key, shuffleList((List) value, exceptFields)); } else { targetMap.put(key, value); }