diff --git a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc index f7a594dec881d..4ca9c849b9b61 100644 --- a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc @@ -53,13 +53,13 @@ percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this: "aggregations": { "load_time_outlier": { "values" : { - "1.0": 9.9, - "5.0": 29.500000000000004, - "25.0": 167.5, + "1.0": 5.0, + "5.0": 25.0, + "25.0": 165.0, "50.0": 445.0, - "75.0": 722.5, - "95.0": 940.5, - "99.0": 980.1000000000001 + "75.0": 725.0, + "95.0": 945.0, + "99.0": 985.0 } } } @@ -129,15 +129,15 @@ Response: "values": [ { "key": 1.0, - "value": 9.9 + "value": 5.0 }, { "key": 5.0, - "value": 29.500000000000004 + "value": 25.0 }, { "key": 25.0, - "value": 167.5 + "value": 165.0 }, { "key": 50.0, @@ -145,15 +145,15 @@ Response: }, { "key": 75.0, - "value": 722.5 + "value": 725.0 }, { "key": 95.0, - "value": 940.5 + "value": 945.0 }, { "key": 99.0, - "value": 980.1000000000001 + "value": 985.0 } ] } diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 31e0bf61707e3..3dff5abc52d9a 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -55,7 +55,7 @@ GET /_cat/master?help -------------------------------------------------- // CONSOLE -Might respond respond with: +Might respond with: [source,txt] -------------------------------------------------- @@ -66,6 +66,11 @@ node | n | node name -------------------------------------------------- // TESTRESPONSE[s/[|]/[|]/ _cat] +NOTE: `help` is not supported if any optional url parameter is used. +For example `GET _cat/shards/twitter?help` or `GET _cat/indices/twi*?help` +results in an error. Use `GET _cat/shards?help` or `GET _cat/indices?help` +instead. + [float] [[headers]] === Headers diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 81c4bc306b2a0..7bdac42f869a7 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -3,7 +3,7 @@ The get API allows to get a typed JSON document from the index based on its id. The following example gets a JSON document from an index called -twitter, under a type called tweet, with id valued 0: +twitter, under a type called _doc, with id valued 0: [source,js] -------------------------------------------------- diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 0c75fd011b418..e172b53f1a83c 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -98,7 +98,7 @@ which returns something similar to: "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", "local_checkpoint" : "-1", - "translog_generation" : "1", + "translog_generation" : "2", "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1" diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 0b8db376e861a..804fb1c65080a 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -184,7 +184,7 @@ The following parameters are accepted by `nested` fields: Because nested documents are indexed as separate documents, they can only be accessed within the scope of the `nested` query, the -`nested`/`reverse_nested`, or <>. +`nested`/`reverse_nested` aggregations, or <>. For instance, if a string field within a nested document has <> set to `offsets` to allow use of the postings diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 8d1c9d61a0b42..9470424b381e6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -38,12 +38,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; -import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Optional; -import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; public class Netty4Utils { @@ -171,7 +168,8 @@ public static void closeChannels(final Collection channels) throws IOEx * @param cause the throwable to test */ public static void maybeDie(final Throwable cause) { - final Optional maybeError = maybeError(cause); + final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); + final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); if (maybeError.isPresent()) { /* * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many @@ -182,7 +180,6 @@ public static void maybeDie(final Throwable cause) { try { // try to log the current stack trace final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); logger.error("fatal error on the network layer\n{}", formatted); } finally { new Thread( @@ -194,40 +191,4 @@ public static void maybeDie(final Throwable cause) { } } - static final int MAX_ITERATIONS = 1024; - - /** - * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. - * - * @param cause the root throwable - * - * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable - */ - static Optional maybeError(final Throwable cause) { - // early terminate if the cause is already an error - if (cause instanceof Error) { - return Optional.of((Error) cause); - } - - final Queue queue = new LinkedList<>(); - queue.add(cause); - int iterations = 0; - while (!queue.isEmpty()) { - iterations++; - if (iterations > MAX_ITERATIONS) { - ESLoggerFactory.getLogger(Netty4Utils.class).warn("giving up looking for fatal errors on the network layer", cause); - break; - } - final Throwable current = queue.remove(); - if (current instanceof Error) { - return Optional.of((Error) current); - } - Collections.addAll(queue, current.getSuppressed()); - if (current.getCause() != null) { - queue.add(current.getCause()); - } - } - return Optional.empty(); - } - } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java index 43be6f0efdda0..8372a8540b8be 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4UtilsTests.java @@ -22,7 +22,6 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.CompositeByteBuf; import io.netty.buffer.Unpooled; -import io.netty.handler.codec.DecoderException; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase; import org.elasticsearch.common.bytes.BytesArray; @@ -33,9 +32,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Optional; - -import static org.hamcrest.CoreMatchers.equalTo; public class Netty4UtilsTests extends ESTestCase { @@ -79,60 +75,6 @@ public void testToChannelBuffer() throws IOException { assertArrayEquals(BytesReference.toBytes(ref), BytesReference.toBytes(bytesReference)); } - public void testMaybeError() { - final Error outOfMemoryError = new OutOfMemoryError(); - assertError(outOfMemoryError, outOfMemoryError); - - final DecoderException decoderException = new DecoderException(outOfMemoryError); - assertError(decoderException, outOfMemoryError); - - final Exception e = new Exception(); - e.addSuppressed(decoderException); - assertError(e, outOfMemoryError); - - final int depth = randomIntBetween(1, 16); - Throwable cause = new Exception(); - boolean fatal = false; - Error error = null; - for (int i = 0; i < depth; i++) { - final int length = randomIntBetween(1, 4); - for (int j = 0; j < length; j++) { - if (!fatal && rarely()) { - error = new Error(); - cause.addSuppressed(error); - fatal = true; - } else { - cause.addSuppressed(new Exception()); - } - } - if (!fatal && rarely()) { - cause = error = new Error(cause); - fatal = true; - } else { - cause = new Exception(cause); - } - } - if (fatal) { - assertError(cause, error); - } else { - assertFalse(Netty4Utils.maybeError(cause).isPresent()); - } - - assertFalse(Netty4Utils.maybeError(new Exception(new DecoderException())).isPresent()); - - Throwable chain = outOfMemoryError; - for (int i = 0; i < Netty4Utils.MAX_ITERATIONS; i++) { - chain = new Exception(chain); - } - assertFalse(Netty4Utils.maybeError(chain).isPresent()); - } - - private void assertError(final Throwable cause, final Error error) { - final Optional maybeError = Netty4Utils.maybeError(cause); - assertTrue(maybeError.isPresent()); - assertThat(maybeError.get(), equalTo(error)); - } - private BytesReference getRandomizedBytesReference(int length) throws IOException { // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index f5a9469f357fb..83521074c92ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -15,7 +15,7 @@ setup: - do: indices.stats: metric: [ translog ] - - set: { indices.test.primaries.translog.size_in_bytes: empty_size } + - set: { indices.test.primaries.translog.size_in_bytes: creation_size } - do: index: @@ -27,9 +27,11 @@ setup: - do: indices.stats: metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 1 } - - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } +# we can't check this yet as creation size will contain two empty translog generations. A single +# non empty generation with one op may be smaller or larger than that. +# - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 1 } - do: @@ -39,9 +41,10 @@ setup: - do: indices.stats: metric: [ translog ] - - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 1 } - - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + ## creation translog size has some overhead due to an initial empty generation that will be trimmed later + - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } - do: @@ -59,7 +62,8 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.size_in_bytes: $empty_size } + ## creation translog size has some overhead due to an initial empty generation that will be trimmed later + - lte: { indices.test.primaries.translog.size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.operations: 0 } - - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml index 0a941f0eca542..4413a7a5c7db1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml @@ -65,21 +65,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } - do: search: @@ -100,21 +100,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } --- @@ -135,21 +135,21 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 0 } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 2.5 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.1\.0: 1.0 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.75\.0: 113.5 } - - match: { aggregations.percentiles_double.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_double.values.99\.0: 149.5 } + - match: { aggregations.percentiles_double.values.75\.0: 126.0 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } @@ -176,21 +176,21 @@ setup: - match: { hits.total: 3 } - length: { hits.hits: 3 } - - match: { aggregations.percentiles_int.values.1\.0: 52.0 } - - match: { aggregations.percentiles_int.values.5\.0: 56.0 } - - match: { aggregations.percentiles_int.values.25\.0: 76.0 } + - match: { aggregations.percentiles_int.values.1\.0: 51.0 } + - match: { aggregations.percentiles_int.values.5\.0: 51.0 } + - match: { aggregations.percentiles_int.values.25\.0: 63.5 } - match: { aggregations.percentiles_int.values.50\.0: 101.0 } - - match: { aggregations.percentiles_int.values.75\.0: 126.0 } - - match: { aggregations.percentiles_int.values.95\.0: 146.0 } - - match: { aggregations.percentiles_int.values.99\.0: 150.0 } + - match: { aggregations.percentiles_int.values.75\.0: 138.5 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } - - match: { aggregations.percentiles_double.values.1\.0: 52.0 } - - match: { aggregations.percentiles_double.values.5\.0: 56.0 } - - match: { aggregations.percentiles_double.values.25\.0: 76.0 } + - match: { aggregations.percentiles_double.values.1\.0: 51.0 } + - match: { aggregations.percentiles_double.values.5\.0: 51.0 } + - match: { aggregations.percentiles_double.values.25\.0: 63.5 } - match: { aggregations.percentiles_double.values.50\.0: 101.0 } - - match: { aggregations.percentiles_double.values.75\.0: 126.0 } - - match: { aggregations.percentiles_double.values.95\.0: 146.0 } - - match: { aggregations.percentiles_double.values.99\.0: 150.0 } + - match: { aggregations.percentiles_double.values.75\.0: 138.5 } + - match: { aggregations.percentiles_double.values.95\.0: 151.0 } + - match: { aggregations.percentiles_double.values.99\.0: 151.0 } --- "Missing field with missing param": @@ -248,13 +248,13 @@ setup: - match: { aggregations.percentiles_int.meta.foo: "bar" } - - match: { aggregations.percentiles_int.values.1\.0: 2.5 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } + - match: { aggregations.percentiles_int.values.1\.0: 1.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_int.values.75\.0: 113.5 } - - match: { aggregations.percentiles_int.values.95\.0: 143.49999999999997 } - - match: { aggregations.percentiles_int.values.99\.0: 149.5 } + - match: { aggregations.percentiles_int.values.75\.0: 126.0 } + - match: { aggregations.percentiles_int.values.95\.0: 151.0 } + - match: { aggregations.percentiles_int.values.99\.0: 151.0 } --- "Invalid params test": @@ -329,12 +329,12 @@ setup: - match: { hits.total: 4 } - length: { hits.hits: 4 } - - match: { aggregations.percentiles_int.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_int.values.25\.0: 38.5 } - - match: { aggregations.percentiles_int.values.50\.0: 76.0 } + - match: { aggregations.percentiles_int.values.5\.0: 1.0 } + - match: { aggregations.percentiles_int.values.25\.0: 26.0 } + - match: { aggregations.percentiles_int.values.50\.0: 76.0 } - - match: { aggregations.percentiles_double.values.5\.0: 8.500000000000002 } - - match: { aggregations.percentiles_double.values.25\.0: 38.5 } + - match: { aggregations.percentiles_double.values.5\.0: 1.0 } + - match: { aggregations.percentiles_double.values.25\.0: 26.0 } - match: { aggregations.percentiles_double.values.50\.0: 76.0 } --- @@ -355,9 +355,9 @@ setup: - length: { hits.hits: 4 } - match: { aggregations.percentiles_int.values.0.key: 5.0 } - - match: { aggregations.percentiles_int.values.0.value: 8.500000000000002 } + - match: { aggregations.percentiles_int.values.0.value: 1.0 } - match: { aggregations.percentiles_int.values.1.key: 25.0 } - - match: { aggregations.percentiles_int.values.1.value: 38.5 } + - match: { aggregations.percentiles_int.values.1.value: 26.0 } - match: { aggregations.percentiles_int.values.2.key: 50.0 } - match: { aggregations.percentiles_int.values.2.value: 76.0 } diff --git a/server/build.gradle b/server/build.gradle index 9ec3d73e3cc67..7b30f57d885e8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -99,7 +99,7 @@ dependencies { compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" // percentiles aggregation - compile 'com.tdunning:t-digest:3.0' + compile 'com.tdunning:t-digest:3.2' // precentil ranks aggregation compile 'org.hdrhistogram:HdrHistogram:2.1.9' diff --git a/server/licenses/t-digest-3.0.jar.sha1 b/server/licenses/t-digest-3.0.jar.sha1 deleted file mode 100644 index ce2f2e2f04098..0000000000000 --- a/server/licenses/t-digest-3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84ccf145ac2215e6bfa63baa3101c0af41017cfc \ No newline at end of file diff --git a/server/licenses/t-digest-3.2.jar.sha1 b/server/licenses/t-digest-3.2.jar.sha1 new file mode 100644 index 0000000000000..de6e848545f38 --- /dev/null +++ b/server/licenses/t-digest-3.2.jar.sha1 @@ -0,0 +1 @@ +2ab94758b0276a8a26102adf8d528cf6d0567b9a \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 902bcee63fb1e..05ac4d942b35e 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -34,8 +34,12 @@ import java.io.StringWriter; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -128,6 +132,42 @@ public static String formatStackTrace(final StackTraceElement[] stackTrace) { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } + static final int MAX_ITERATIONS = 1024; + + /** + * Unwrap the specified throwable looking for any suppressed errors or errors as a root cause of the specified throwable. + * + * @param cause the root throwable + * + * @return an optional error if one is found suppressed or a root cause in the tree rooted at the specified throwable + */ + public static Optional maybeError(final Throwable cause, final Logger logger) { + // early terminate if the cause is already an error + if (cause instanceof Error) { + return Optional.of((Error) cause); + } + + final Queue queue = new LinkedList<>(); + queue.add(cause); + int iterations = 0; + while (!queue.isEmpty()) { + iterations++; + if (iterations > MAX_ITERATIONS) { + logger.warn("giving up looking for fatal errors", cause); + break; + } + final Throwable current = queue.remove(); + if (current instanceof Error) { + return Optional.of((Error) current); + } + Collections.addAll(queue, current.getSuppressed()); + if (current.getCause() != null) { + queue.add(current.getCause()); + } + } + return Optional.empty(); + } + /** * Rethrows the first exception in the list and adds all remaining to the suppressed list. * If the given list is empty no exception is thrown diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0fad0d26c13a7..9d82d3ea69049 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -163,10 +163,6 @@ public class Version implements Comparable { + org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]"; } - private static class DeclaredVersionsHolder { - static final List DECLARED_VERSIONS = Collections.unmodifiableList(getDeclaredVersions(Version.class)); - } - public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } @@ -406,6 +402,15 @@ public int compareTo(Version other) { return Integer.compare(this.id, other.id); } + /* + * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it + * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions + * lazily once. + */ + private static class DeclaredVersionsHolder { + static final List DECLARED_VERSIONS = Collections.unmodifiableList(getDeclaredVersions(Version.class)); + } + /** * Returns the minimum compatible version based on the current * version. Ie a node needs to have at least the return version in order diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 93a0ae240635f..46c68f7151bc6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -480,13 +480,18 @@ private void recoverFromTranslogInternal() throws IOException { private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { assert openMode != null; final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); - String translogUUID = null; - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - translogUUID = loadTranslogUUIDFromLastCommit(); - // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - if (translogUUID == null) { - throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); - } + final String translogUUID; + switch (openMode) { + case CREATE_INDEX_AND_TRANSLOG: + case OPEN_INDEX_CREATE_TRANSLOG: + translogUUID = + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), globalCheckpointSupplier.getAsLong(), shardId); + break; + case OPEN_INDEX_AND_TRANSLOG: + translogUUID = loadTranslogUUIDFromLastCommit(); + break; + default: + throw new AssertionError("Unknown openMode " + openMode); } return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index cd8e6ea2f7bf7..8e2cc5b210bea 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; @@ -45,6 +46,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShardComponent; +import org.elasticsearch.index.shard.ShardId; import java.io.Closeable; import java.io.EOFException; @@ -132,23 +134,19 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * translog file referenced by this generation. The translog creation will fail if this generation can't be opened. * * @param config the configuration of this translog - * @param expectedTranslogUUID the translog uuid to open, null for a new translog + * @param translogUUID the translog uuid to open, null for a new translog * @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely * deleted * @param globalCheckpointSupplier a supplier for the global checkpoint */ public Translog( - final TranslogConfig config, final String expectedTranslogUUID, TranslogDeletionPolicy deletionPolicy, + final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, final LongSupplier globalCheckpointSupplier) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; this.deletionPolicy = deletionPolicy; - if (expectedTranslogUUID == null) { - translogUUID = UUIDs.randomBase64UUID(); - } else { - translogUUID = expectedTranslogUUID; - } + this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); ReadWriteLock rwl = new ReentrantReadWriteLock(); readLock = new ReleasableLock(rwl.readLock()); @@ -157,53 +155,38 @@ public Translog( Files.createDirectories(this.location); try { - if (expectedTranslogUUID != null) { - final Checkpoint checkpoint = readCheckpoint(location); - final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); - final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); - // this is special handling for error condition when we create a new writer but we fail to bake - // the newly written file (generation+1) into the checkpoint. This is still a valid state - // we just need to cleanup before we continue - // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: - // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example - // - // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists - // if not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(expectedTranslogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; - if (Files.exists(currentCheckpointFile) // current checkpoint is already copied - && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning - logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); - } - this.readers.addAll(recoverFromFiles(checkpoint)); - if (readers.isEmpty()) { - throw new IllegalStateException("at least one reader must be recovered"); - } - boolean success = false; - current = null; - try { - current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); - success = true; - } finally { - // we have to close all the recovered ones otherwise we leak file handles here - // for instance if we have a lot of tlog and we can't create the writer we keep on holding - // on to all the uncommitted tlog files if we don't close - if (success == false) { - IOUtils.closeWhileHandlingException(readers); - } + final Checkpoint checkpoint = readCheckpoint(location); + final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1)); + final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); + // this is special handling for error condition when we create a new writer but we fail to bake + // the newly written file (generation+1) into the checkpoint. This is still a valid state + // we just need to cleanup before we continue + // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: + // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example + // + // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists + // if not we don't even try to clean it up and wait until we fail creating it + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + if (Files.exists(currentCheckpointFile) // current checkpoint is already copied + && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning + logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); + } + this.readers.addAll(recoverFromFiles(checkpoint)); + if (readers.isEmpty()) { + throw new IllegalStateException("at least one reader must be recovered"); + } + boolean success = false; + current = null; + try { + current = createWriter(checkpoint.generation + 1, getMinFileGeneration(), checkpoint.globalCheckpoint); + success = true; + } finally { + // we have to close all the recovered ones otherwise we leak file handles here + // for instance if we have a lot of tlog and we can't create the writer we keep on holding + // on to all the uncommitted tlog files if we don't close + if (success == false) { + IOUtils.closeWhileHandlingException(readers); } - } else { - IOUtils.rm(location); - // start from whatever generation lucene points to - final long generation = deletionPolicy.getMinTranslogGenerationForRecovery(); - logger.debug("wipe translog location - creating new translog, starting generation [{}]", generation); - Files.createDirectories(location); - final long initialGlobalCheckpoint = globalCheckpointSupplier.getAsLong(); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, generation); - final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); - Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); - IOUtils.fsync(checkpointFile, false); - current = createWriter(generation, generation, initialGlobalCheckpoint); - readers.clear(); } } catch (Exception e) { // close the opened translog files if we fail to create a new translog... @@ -409,9 +392,9 @@ private int totalOperations(long minGeneration) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); return Stream.concat(readers.stream(), Stream.of(current)) - .filter(r -> r.getGeneration() >= minGeneration) - .mapToInt(BaseTranslogReader::totalOperations) - .sum(); + .filter(r -> r.getGeneration() >= minGeneration) + .mapToInt(BaseTranslogReader::totalOperations) + .sum(); } } @@ -432,9 +415,9 @@ private long sizeInBytesByMinGen(long minGeneration) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); return Stream.concat(readers.stream(), Stream.of(current)) - .filter(r -> r.getGeneration() >= minGeneration) - .mapToLong(BaseTranslogReader::sizeInBytes) - .sum(); + .filter(r -> r.getGeneration() >= minGeneration) + .mapToLong(BaseTranslogReader::sizeInBytes) + .sum(); } } @@ -588,8 +571,8 @@ public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { "Min referenced generation is [" + getMinFileGeneration() + "]"); } TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current)) - .filter(reader -> reader.getGeneration() >= minGeneration) - .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); + .filter(reader -> reader.getGeneration() >= minGeneration) + .map(BaseTranslogReader::newSnapshot).toArray(TranslogSnapshot[]::new); return newMultiSnapshot(snapshots); } } @@ -626,8 +609,8 @@ private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOExcepti private Stream readersAboveMinSeqNo(long minSeqNo) { assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread() : - "callers of readersAboveMinSeqNo must hold a lock: readLock [" - + readLock.isHeldByCurrentThread() + "], writeLock [" + readLock.isHeldByCurrentThread() + "]"; + "callers of readersAboveMinSeqNo must hold a lock: readLock [" + + readLock.isHeldByCurrentThread() + "], writeLock [" + readLock.isHeldByCurrentThread() + "]"; return Stream.concat(readers.stream(), Stream.of(current)) .filter(reader -> { final long maxSeqNo = reader.getCheckpoint().maxSeqNo; @@ -1113,14 +1096,14 @@ public boolean equals(Object o) { Index index = (Index) o; if (version != index.version || - seqNo != index.seqNo || - primaryTerm != index.primaryTerm || - id.equals(index.id) == false || - type.equals(index.type) == false || - versionType != index.versionType || - autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp || - source.equals(index.source) == false) { - return false; + seqNo != index.seqNo || + primaryTerm != index.primaryTerm || + id.equals(index.id) == false || + type.equals(index.type) == false || + versionType != index.versionType || + autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp || + source.equals(index.source) == false) { + return false; } if (routing != null ? !routing.equals(index.routing) : index.routing != null) { return false; @@ -1293,10 +1276,10 @@ public boolean equals(Object o) { Delete delete = (Delete) o; return version == delete.version && - seqNo == delete.seqNo && - primaryTerm == delete.primaryTerm && - uid.equals(delete.uid) && - versionType == delete.versionType; + seqNo == delete.seqNo && + primaryTerm == delete.primaryTerm && + uid.equals(delete.uid) && + versionType == delete.versionType; } @Override @@ -1421,7 +1404,7 @@ private static void verifyChecksum(BufferedChecksumStreamInput in) throws IOExce long readChecksum = in.readInt() & 0xFFFF_FFFFL; if (readChecksum != expectedChecksum) { throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" + - Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); + Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); } } @@ -1543,7 +1526,7 @@ public void rollGeneration() throws IOException { final Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); assert Checkpoint.read(checkpoint).generation == current.getGeneration(); final Path generationCheckpoint = - location.resolve(getCommitCheckpointFileName(current.getGeneration())); + location.resolve(getCommitCheckpointFileName(current.getGeneration())); Files.copy(checkpoint, generationCheckpoint); IOUtils.fsync(generationCheckpoint, false); IOUtils.fsync(generationCheckpoint.getParent(), true); @@ -1728,4 +1711,26 @@ TranslogWriter getCurrent() { List getReaders() { return readers; } + + public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, final ShardId shardId) + throws IOException { + final ChannelFactory channelFactory = FileChannel::open; + return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory); + } + + static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory) throws IOException { + IOUtils.rm(location); + Files.createDirectories(location); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); + final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); + Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + IOUtils.fsync(checkpointFile, false); + final String translogUUID = UUIDs.randomBase64UUID(); + TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, + new ByteSizeValue(10), 1, initialGlobalCheckpoint, + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); } + ); + writer.close(); + return translogUUID; + } } diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java new file mode 100644 index 0000000000000..011f5b380ecbd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch; + +import org.apache.commons.codec.DecoderException; +import org.elasticsearch.test.ESTestCase; + +import java.util.Optional; + +import static org.elasticsearch.ExceptionsHelper.MAX_ITERATIONS; +import static org.elasticsearch.ExceptionsHelper.maybeError; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ExceptionsHelperTests extends ESTestCase { + + public void testMaybeError() { + final Error outOfMemoryError = new OutOfMemoryError(); + assertError(outOfMemoryError, outOfMemoryError); + + final DecoderException decoderException = new DecoderException(outOfMemoryError); + assertError(decoderException, outOfMemoryError); + + final Exception e = new Exception(); + e.addSuppressed(decoderException); + assertError(e, outOfMemoryError); + + final int depth = randomIntBetween(1, 16); + Throwable cause = new Exception(); + boolean fatal = false; + Error error = null; + for (int i = 0; i < depth; i++) { + final int length = randomIntBetween(1, 4); + for (int j = 0; j < length; j++) { + if (!fatal && rarely()) { + error = new Error(); + cause.addSuppressed(error); + fatal = true; + } else { + cause.addSuppressed(new Exception()); + } + } + if (!fatal && rarely()) { + cause = error = new Error(cause); + fatal = true; + } else { + cause = new Exception(cause); + } + } + if (fatal) { + assertError(cause, error); + } else { + assertFalse(maybeError(cause, logger).isPresent()); + } + + assertFalse(maybeError(new Exception(new DecoderException()), logger).isPresent()); + + Throwable chain = outOfMemoryError; + for (int i = 0; i < MAX_ITERATIONS; i++) { + chain = new Exception(chain); + } + assertFalse(maybeError(chain, logger).isPresent()); + } + + private void assertError(final Throwable cause, final Error error) { + final Optional maybeError = maybeError(cause, logger); + assertTrue(maybeError.isPresent()); + assertThat(maybeError.get(), equalTo(error)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9754306025d8b..855cc693858fa 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -109,8 +109,8 @@ import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.ShardId; @@ -1021,25 +1021,25 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { } engine.flush(); - assertThat(engine.getTranslog().currentFileGeneration(), equalTo(2L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 2L : 1L)); - assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(2L)); + assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); + assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); engine.flush(); - assertThat(engine.getTranslog().currentFileGeneration(), equalTo(2L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 2L : 1L)); - assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(2L)); - - engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(3L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 1L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 3L : 2L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(3L)); - globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); engine.flush(true, true); assertThat(engine.getTranslog().currentFileGeneration(), equalTo(4L)); - assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(4L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(inSync ? 4L : 2L)); assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(4L)); + + globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); + engine.flush(true, true); + assertThat(engine.getTranslog().currentFileGeneration(), equalTo(5L)); + assertThat(engine.getTranslog().getDeletionPolicy().getMinTranslogGenerationForRecovery(), equalTo(5L)); + assertThat(engine.getTranslog().getDeletionPolicy().getTranslogGenerationOfLastCommit(), equalTo(5L)); } public void testSyncedFlush() throws IOException { @@ -2611,9 +2611,11 @@ public void testRecoverFromForeignTranslog() throws IOException { Translog.TranslogGeneration generation = engine.getTranslog().getGeneration(); engine.close(); + final Path badTranslogLog = createTempDir(); + final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId); Translog translog = new Translog( - new TranslogConfig(shardId, createTempDir(), INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2835,7 +2837,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { globalCheckpoint.set(engine.getLocalCheckpointTracker().getCheckpoint()); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -2846,14 +2848,14 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertTrue(engine.isRecovering()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); if (i == 0) { - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } else { - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); engine.recoverFromTranslog(); userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("4", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -2862,10 +2864,10 @@ public void testCurrentTranslogIDisCommitted() throws IOException { { try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); - assertEquals(1, engine.getTranslog().currentFileGeneration()); + assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().uncommittedOperations()); } } @@ -2875,11 +2877,11 @@ public void testCurrentTranslogIDisCommitted() throws IOException { for (int i = 0; i < 2; i++) { try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); engine.recoverFromTranslog(); userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("no changes - nothing to commit", "2", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -4421,7 +4423,7 @@ public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(lastCommit.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); // Translog tags should be fresh. assertThat(userData.get(Translog.TRANSLOG_UUID_KEY), not(equalTo(lastCommit.get(Translog.TRANSLOG_UUID_KEY)))); - assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); + assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("2")); } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 45c940b00b4a0..2ddf0751bfb17 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -147,11 +147,19 @@ protected void afterIfSuccessful() throws Exception { } - protected Translog createTranslog(TranslogConfig config, String translogUUID) throws IOException { + protected Translog createTranslog(TranslogConfig config) throws IOException { + String translogUUID = + Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + () -> SequenceNumbers.NO_OPS_PERFORMED); } + protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { + return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), + () -> SequenceNumbers.NO_OPS_PERFORMED); + } + + private void markCurrentGenAsCommitted(Translog translog) throws IOException { long genToCommit = translog.currentFileGeneration(); long genToRetain = randomLongBetween(translog.getDeletionPolicy().getMinTranslogGenerationForRecovery(), genToCommit); @@ -194,10 +202,11 @@ public void tearDown() throws Exception { } private Translog create(Path path) throws IOException { - globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); - return new Translog(translogConfig, null, deletionPolicy, () -> globalCheckpoint.get()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get()); } private TranslogConfig getTranslogConfig(final Path path) { @@ -220,7 +229,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting } final IndexSettings indexSettings = - IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } @@ -372,39 +381,39 @@ public void testStats() throws IOException { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(1)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(97L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(140L)); assertThat(stats.getUncommittedOperations(), equalTo(1)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(97L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(140L)); } translog.add(new Translog.Delete("test", "2", 1, newUid("2"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(146L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(189L)); assertThat(stats.getUncommittedOperations(), equalTo(2)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(146L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(189L)); } translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(195L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(238L)); assertThat(stats.getUncommittedOperations(), equalTo(3)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(195L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(238L)); } translog.add(new Translog.NoOp(3, 1, randomAlphaOfLength(16))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(237L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(280L)); assertThat(stats.getUncommittedOperations(), equalTo(4)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(280L)); } - final long expectedSizeInBytes = 280L; + final long expectedSizeInBytes = 323L; translog.rollGeneration(); { final TranslogStats stats = stats(); @@ -521,7 +530,7 @@ public void testSnapshot() throws IOException { } try (Translog.Snapshot snapshot = translog.newSnapshot(); - Translog.Snapshot snapshot1 = translog.newSnapshot()) { + Translog.Snapshot snapshot1 = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(1)); @@ -1235,15 +1244,15 @@ public void testBasicRecovery() throws IOException { translog.close(); if (translogGeneration == null) { - translog = createTranslog(config, null); + translog = createTranslog(config); assertEquals(0, translog.stats().estimatedNumberOfOperations()); - assertEquals(1, translog.currentFileGeneration()); + assertEquals(2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try(Translog.Snapshot snapshot = translog.newSnapshot()) { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { @@ -1269,7 +1278,8 @@ public void testRecoveryUncommitted() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1281,7 +1291,7 @@ public void testRecoveryUncommitted() throws IOException { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1295,9 +1305,10 @@ public void testRecoveryUncommitted() throws IOException { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", + translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1323,7 +1334,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1339,7 +1351,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1354,9 +1366,10 @@ public void testRecoveryUncommittedFileExists() throws IOException { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", + translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1381,7 +1394,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); - assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration); + assertEquals("expected this to be the first roll (1 gen is on creation, 2 when opened)", + 2L, translogGeneration.translogFileGeneration); assertNotNull(translogGeneration.translogUUID); } } @@ -1391,19 +1405,19 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint read = Checkpoint.read(ckp); - Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, 0); + Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.NO_OPS_PERFORMED, 0); Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { fail("corrupted"); } catch (IllegalStateException ex) { - assertEquals("Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + - "numOps=55, generation=2, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-2, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + - "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2, minTranslogGeneration=0}", ex.getMessage()); + assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1480,12 +1494,12 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -1671,7 +1685,7 @@ public void testFailFlush() throws IOException { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1807,7 +1821,7 @@ protected void afterAdd() throws IOException { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1853,7 +1867,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -1909,7 +1923,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -1957,25 +1971,28 @@ public void onceFailedFailAlways() { private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites, final boolean throwUnknownException, String translogUUID, final TranslogDeletionPolicy deletionPolicy) throws IOException { - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel channel = FileChannel.open(file, openOption); + boolean success = false; + try { + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); + success = true; + return throwingFileChannel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(channel); + } + } + }; + if (translogUUID == null) { + translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory); + } + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED) { @Override ChannelFactory getChannelFactory() { - final ChannelFactory factory = super.getChannelFactory(); - - return (file, openOption) -> { - FileChannel channel = factory.open(file, openOption); - boolean success = false; - try { - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); - success = true; - return throwingFileChannel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(channel); - } - } - }; + return channelFactory; } @Override @@ -2079,11 +2096,11 @@ private static final class UnknownException extends RuntimeException { public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { Path tempDir = createTempDir(); TranslogConfig config = getTranslogConfig(tempDir); - Translog translog = createTranslog(config, null); + Translog translog = createTranslog(config); translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED) { @Override protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { @@ -2106,7 +2123,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { Checkpoint read = Checkpoint.read(ckp); Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation))); Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); - try (Translog tlog = createTranslog(config, translog.getTranslogUUID()); + try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); Translog.Snapshot snapshot = tlog.newSnapshot()) { assertFalse(tlog.syncNeeded()); @@ -2117,7 +2134,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(2).getBytes(Charset.forName("UTF-8")))); } - try (Translog tlog = createTranslog(config, translog.getTranslogUUID()); + try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); Translog.Snapshot snapshot = tlog.newSnapshot()) { assertFalse(tlog.syncNeeded()); @@ -2141,7 +2158,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2163,7 +2180,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2176,7 +2193,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { } try { - Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2282,7 +2299,11 @@ public void testWithRandomException() throws IOException { TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setTranslogGenerationOfLastCommit(minGenForRecovery); deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + if (generationUUID == null) { + // we never managed to successfully create a translog, make it + generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + } + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { @@ -2347,14 +2368,14 @@ public void testPendingDelete() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); translog.add(new Translog.Index("test", "3", 2, new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); } public static Translog.Location randomTranslogLocation() { @@ -2382,7 +2403,7 @@ public void testTranslogOpSerialization() throws Exception { null); Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, - 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); + 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); Translog.Index index = new Translog.Index(eIndex, eIndexResult); @@ -2393,7 +2414,7 @@ public void testTranslogOpSerialization() throws Exception { assertEquals(index, serializedIndex); Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm, - 2, VersionType.INTERNAL, Origin.PRIMARY, 0); + 2, VersionType.INTERNAL, Origin.PRIMARY, 0); Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 69176b03942f6..a496664c0260b 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -44,7 +43,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; import java.util.HashMap; import java.util.List; @@ -52,7 +50,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; -import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -186,7 +183,6 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.indexDocs(nonFlushedDocs); IndexShard replica = shards.getReplicas().get(0); - final String translogUUID = replica.getTranslog().getTranslogUUID(); final String historyUUID = replica.getHistoryUUID(); Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration(); shards.removeReplica(replica); @@ -204,13 +200,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { final String historyUUIDtoUse = UUIDs.randomBase64UUID(random()); if (randomBoolean()) { // create a new translog - final TranslogConfig translogConfig = - new TranslogConfig(replica.shardId(), replica.shardPath().resolveTranslog(), replica.indexSettings(), - BigArrays.NON_RECYCLING_INSTANCE); - try (Translog translog = new Translog(translogConfig, null, createTranslogDeletionPolicy(), () -> flushedDocs)) { - translogUUIDtoUse = translog.getTranslogUUID(); - translogGenToUse = translog.currentFileGeneration(); - } + translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, replica.shardId()); + translogGenToUse = 1; } else { translogUUIDtoUse = translogGeneration.translogUUID; translogGenToUse = translogGeneration.translogFileGeneration; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index debd51a2633b4..3846168009dc6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -69,7 +69,6 @@ protected Collection> nodePlugins() { } private static double[] randomPercents(long minValue, long maxValue) { - final int length = randomIntBetween(1, 20); final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { @@ -97,7 +96,7 @@ private static PercentileRanksAggregationBuilder randomCompression(PercentileRan return builder; } - private void assertConsistent(double[] pcts, PercentileRanks values, long minValue, long maxValue) { + private void assertConsistent(double[] pcts, PercentileRanks values, long minValue) { final List percentileList = CollectionUtils.iterableAsArrayList(values); assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { @@ -109,9 +108,6 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal if (percentile.getPercent() == 0) { assertThat(percentile.getValue(), lessThanOrEqualTo((double) minValue)); } - if (percentile.getPercent() == 100) { - assertThat(percentile.getValue(), greaterThanOrEqualTo((double) maxValue)); - } } for (int i = 1; i < percentileList.size(); ++i) { @@ -193,7 +189,7 @@ public void testSingleValuedField() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -233,7 +229,7 @@ public void testSingleValuedFieldOutsideRange() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -248,7 +244,7 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -266,7 +262,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -286,7 +282,7 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -301,7 +297,7 @@ public void testMultiValuedField() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues); + assertConsistent(pcts, values, minValues); } @Override @@ -319,7 +315,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } public void testMultiValuedFieldWithValueScriptReverse() throws Exception { @@ -336,7 +332,7 @@ public void testMultiValuedFieldWithValueScriptReverse() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, -maxValues, -minValues); + assertConsistent(pcts, values, -maxValues); } @Override @@ -356,7 +352,7 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } @Override @@ -373,7 +369,7 @@ public void testScriptSingleValued() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue, maxValue); + assertConsistent(pcts, values, minValue); } @Override @@ -394,7 +390,7 @@ public void testScriptSingleValuedWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValue - 1, maxValue - 1); + assertConsistent(pcts, values, minValue - 1); } @Override @@ -412,7 +408,7 @@ public void testScriptMultiValued() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues, maxValues); + assertConsistent(pcts, values, minValues); } @Override @@ -431,7 +427,7 @@ public void testScriptMultiValuedWithParams() throws Exception { assertHitCount(searchResponse, 10); final PercentileRanks values = searchResponse.getAggregations().get("percentile_ranks"); - assertConsistent(pcts, values, minValues - 1, maxValues - 1); + assertConsistent(pcts, values, minValues - 1); } public void testOrderBySubAggregation() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java index 1605e3710e25b..85ab361a8b337 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java @@ -74,12 +74,12 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { }, tdigest -> { assertEquals(7L, tdigest.state.size()); assertEquals(7L, tdigest.state.centroidCount()); - assertEquals(4.0d, tdigest.percentile(75), 0.0d); - assertEquals("4.0", tdigest.percentileAsString(75)); + assertEquals(4.5d, tdigest.percentile(75), 0.0d); + assertEquals("4.5", tdigest.percentileAsString(75)); assertEquals(2.0d, tdigest.percentile(50), 0.0d); assertEquals("2.0", tdigest.percentileAsString(50)); - assertEquals(1.0d, tdigest.percentile(20), 0.0d); - assertEquals("1.0", tdigest.percentileAsString(20)); + assertEquals(1.0d, tdigest.percentile(22), 0.0d); + assertEquals("1.0", tdigest.percentileAsString(22)); }); } @@ -97,14 +97,14 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals(tdigest.state.centroidCount(), 7L); assertEquals(8.0d, tdigest.percentile(100), 0.0d); assertEquals("8.0", tdigest.percentileAsString(100)); - assertEquals(5.48d, tdigest.percentile(86), 0.0d); - assertEquals("5.48", tdigest.percentileAsString(86)); + assertEquals(6.98d, tdigest.percentile(88), 0.0d); + assertEquals("6.98", tdigest.percentileAsString(88)); assertEquals(1.0d, tdigest.percentile(33), 0.0d); assertEquals("1.0", tdigest.percentileAsString(33)); assertEquals(1.0d, tdigest.percentile(25), 0.0d); assertEquals("1.0", tdigest.percentileAsString(25)); - assertEquals(0.06d, tdigest.percentile(1), 0.0d); - assertEquals("0.06", tdigest.percentileAsString(1)); + assertEquals(0.0d, tdigest.percentile(1), 0.0d); + assertEquals("0.0", tdigest.percentileAsString(1)); }); } @@ -124,7 +124,7 @@ public void testQueryFiltering() throws IOException { assertEquals(4L, tdigest.state.centroidCount()); assertEquals(2.0d, tdigest.percentile(100), 0.0d); assertEquals(1.0d, tdigest.percentile(50), 0.0d); - assertEquals(0.75d, tdigest.percentile(25), 0.0d); + assertEquals(0.5d, tdigest.percentile(25), 0.0d); }); testCase(LongPoint.newRangeQuery("row", 100, 110), docs, tdigest -> { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 5481a486185a0..62be0d48bf31c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -60,8 +60,8 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; @@ -253,7 +253,9 @@ protected Translog createTranslog() throws IOException { protected Translog createTranslog(Path translogPath) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + final String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); + return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), + () -> SequenceNumbers.NO_OPS_PERFORMED); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException {