From fbb840b5c870b238163b3e4726d490e2b6711c23 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2018 21:59:26 -0500 Subject: [PATCH 01/94] TEST: Tightens file-based condition in peer-recovery As a replica always keeps a safe commit and starts peer-recovery with that commit; file-based recovery only happens if new operations are added to the primary and the required translog is not fully retained. In the test, we tried to produce this condition by flushing a new commit in order to trim all translog. However, if the new global checkpoint is not persisted yet, we will keep two commits and not trim translog. This commit tightens the file-based condition in the test by waiting for the global checkpoint persisted properly on the new primary before flushing. Close #28209 Relates #28181 --- .../index/replication/RecoveryDuringReplicationTests.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 77576426252d9..aa97c2049915f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -215,7 +215,6 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { } @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.indices.recovery:TRACE") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28209") public void testRecoveryAfterPrimaryPromotion() throws Exception { try (ReplicationGroup shards = createGroup(2)) { shards.startAll(); @@ -268,7 +267,12 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { ); newPrimary.indexSettings().updateIndexMetaData(builder.build()); newPrimary.onSettingsChanged(); - shards.syncGlobalCheckpoint(); + // Make sure the global checkpoint on the new primary is persisted properly, + // otherwise the deletion policy won't trim translog + assertBusy(() -> { + shards.syncGlobalCheckpoint(); + assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); + }); newPrimary.flush(new FlushRequest()); uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); totalDocs += uncommittedOpsOnPrimary; From aec0c0f9b6b9e511e56c725f68f9010a8298ed02 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Sun, 14 Jan 2018 19:20:32 -0500 Subject: [PATCH 02/94] Update version of TaskInfo header serialization after backport Update the serialization version after backporting #27764 to 6.x. --- server/src/main/java/org/elasticsearch/tasks/TaskInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 2bd16a9addf6a..19e9baedd753b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -97,7 +97,7 @@ public TaskInfo(StreamInput in) throws IOException { runningTimeNanos = in.readLong(); cancellable = in.readBoolean(); parentTaskId = TaskId.readFromStream(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { headers = in.readMap(StreamInput::readString, StreamInput::readString); } else { headers = Collections.emptyMap(); @@ -115,7 +115,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(runningTimeNanos); out.writeBoolean(cancellable); parentTaskId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } } From 023d08ee919b6508ec19b0eb10f15001b1e8a0b1 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 15 Jan 2018 08:44:49 +0000 Subject: [PATCH 03/94] Adds metadata to rewritten aggregations (#28185) * Adds metadata to rewritten aggregations Previous to this change, if any filters in the filters aggregation were rewritten, the rewritten version of the FiltersAggregationBuilder would not contain the metadata form the original. This is because `AbstractAggregationBuilder.getMetadata()` returns an empty map when not metadata is set. Closes #28170 * Always set metadata when rewritten --- .../search/aggregations/AggregationBuilder.java | 4 +--- .../search/aggregations/FiltersAggsRewriteIT.java | 6 ++++++ .../search/aggregations/bucket/FiltersTests.java | 2 ++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 99bf9be683ee3..80d8277f4cab2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -101,9 +101,7 @@ public final AggregationBuilder rewrite(QueryRewriteContext context) throws IOEx if (rewritten == this) { return rewritten; } - if (getMetaData() != null && rewritten.getMetaData() == null) { - rewritten.setMetaData(getMetaData()); - } + rewritten.setMetaData(getMetaData()); AggregatorFactories.Builder rewrittenSubAggs = factoriesBuilder.rewrite(context); rewritten.subAggregations(rewrittenSubAggs); return rewritten; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index bb4c3a2a5eb0f..ce5e4a694f279 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; public class FiltersAggsRewriteIT extends ESSingleNodeTestCase { @@ -58,10 +60,14 @@ public void testWrapperQueryIsRewritten() throws IOException { } FiltersAggregationBuilder builder = new FiltersAggregationBuilder("titles", new FiltersAggregator.KeyedFilter("titleterms", new WrapperQueryBuilder(bytesReference))); + Map metadata = new HashMap<>(); + metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + builder.setMetaData(metadata); SearchResponse searchResponse = client().prepareSearch("test").setSize(0).addAggregation(builder).get(); assertEquals(3, searchResponse.getHits().getTotalHits()); InternalFilters filters = searchResponse.getAggregations().get("titles"); assertEquals(1, filters.getBuckets().size()); assertEquals(2, filters.getBuckets().get(0).getDocCount()); + assertEquals(metadata, filters.getMetaData()); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java index 7e63bbb6f3855..e0cd490134f14 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; import java.io.IOException; +import java.util.Collections; import static org.hamcrest.Matchers.instanceOf; @@ -123,6 +124,7 @@ public void testOtherBucket() throws IOException { public void testRewrite() throws IOException { // test non-keyed filter that doesn't rewrite AggregationBuilder original = new FiltersAggregationBuilder("my-agg", new MatchAllQueryBuilder()); + original.setMetaData(Collections.singletonMap(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20))); AggregationBuilder rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertSame(original, rewritten); From 784eba86b220452da6d356296ecb15366c388307 Mon Sep 17 00:00:00 2001 From: hanbj Date: Mon, 15 Jan 2018 22:09:27 +0800 Subject: [PATCH 04/94] [Docs] Fix an error in painless-types.asciidoc (#28221) --- docs/painless/painless-types.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 36cf78312ea26..9e5077503b4a8 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -311,7 +311,7 @@ to floating point types. | int | explicit | explicit | explicit | | implicit | implicit | implicit | long | explicit | explicit | explicit | explicit | | implicit | implicit | float | explicit | explicit | explicit | explicit | explicit | | implicit -| float | explicit | explicit | explicit | explicit | explicit | explicit | +| double | explicit | explicit | explicit | explicit | explicit | explicit | |==== @@ -376,7 +376,7 @@ cast would normally be required between the non-def types. def x; // Declare def variable x and set it to null x = 3; // Set the def variable x to the literal 3 with an implicit // cast from int to def -double a = x; // Declare double variable y and set it to def variable x, +double a = x; // Declare double variable a and set it to def variable x, // which contains a double int b = x; // ERROR: Results in a run-time error because an explicit cast is // required to cast from a double to an int From be012b132605ab0052d4ab1d7eb736d64b84a2e5 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 16:47:46 +0100 Subject: [PATCH 05/94] upgrade to lucene 7.2.1 (#28218) --- buildSrc/version.properties | 2 +- docs/Versions.asciidoc | 4 ++-- .../licenses/lucene-expressions-7.2.0.jar.sha1 | 1 - .../licenses/lucene-expressions-7.2.1.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 | 1 + .../licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 | 1 + server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 | 1 + server/licenses/lucene-core-7.2.0.jar.sha1 | 1 - server/licenses/lucene-core-7.2.1.jar.sha1 | 1 + server/licenses/lucene-grouping-7.2.0.jar.sha1 | 1 - server/licenses/lucene-grouping-7.2.1.jar.sha1 | 1 + server/licenses/lucene-highlighter-7.2.0.jar.sha1 | 1 - server/licenses/lucene-highlighter-7.2.1.jar.sha1 | 1 + server/licenses/lucene-join-7.2.0.jar.sha1 | 1 - server/licenses/lucene-join-7.2.1.jar.sha1 | 1 + server/licenses/lucene-memory-7.2.0.jar.sha1 | 1 - server/licenses/lucene-memory-7.2.1.jar.sha1 | 1 + server/licenses/lucene-misc-7.2.0.jar.sha1 | 1 - server/licenses/lucene-misc-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queries-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queries-7.2.1.jar.sha1 | 1 + server/licenses/lucene-queryparser-7.2.0.jar.sha1 | 1 - server/licenses/lucene-queryparser-7.2.1.jar.sha1 | 1 + server/licenses/lucene-sandbox-7.2.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 | 1 + server/licenses/lucene-spatial3d-7.2.0.jar.sha1 | 1 - server/licenses/lucene-spatial3d-7.2.1.jar.sha1 | 1 + server/licenses/lucene-suggest-7.2.0.jar.sha1 | 1 - server/licenses/lucene-suggest-7.2.1.jar.sha1 | 1 + server/src/main/java/org/elasticsearch/Version.java | 2 +- 47 files changed, 26 insertions(+), 26 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-core-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-join-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.2.1.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.2.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.2.1.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3c06aecb0fa09..fabcadabd9f96 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.2.0 +lucene = 7.2.1 # optional dependencies spatial4j = 0.6 diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index ae588350b9c8a..3008b1bb3e09a 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.2.0 -:lucene_version_path: 7_2_0 +:lucene_version: 7.2.1 +:lucene_version_path: 7_2_1 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 deleted file mode 100644 index 0e903acab596e..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -848eda48b43c30a7c7e38fa50182a7e866460e95 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..a57efa8c26aa6 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 deleted file mode 100644 index 8c744b138d9b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -726e5cf3515ba765f5f326cdced8abaaa64da875 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..fb8e4b0167bf5 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 @@ -0,0 +1 @@ +cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 deleted file mode 100644 index 72de0db978a26..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -879c63f60c20d9f0f2a106062ad2512158007108 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..f8c67b9480380 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 deleted file mode 100644 index fe98e5ed6ba59..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdf0ae30f09641d2c0b098c3b7a340d59a7ab4b1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2443de6a49b0a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 @@ -0,0 +1 @@ +970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 deleted file mode 100644 index e019470764969..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575096198d49aad52d2e12eb4d43dd547747dd7d \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..1c301d32445ec --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 @@ -0,0 +1 @@ +ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 deleted file mode 100644 index 83c0a09eed763..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0f748e15d3b6b8abbe654ba48ca7cbbebcfb98a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..4833879967b8e --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 @@ -0,0 +1 @@ +58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 deleted file mode 100644 index b7453ece71681..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -547938ebce6a7ea4308c4753e28c39d09e4c7423 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..dc33291c7a3cb --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 @@ -0,0 +1 @@ +51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 deleted file mode 100644 index 2ca17a5b5c1ab..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4e1b4638fb8b07befc8175880641f821af3e655a \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..5ffdd6b7ba4cf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 @@ -0,0 +1 @@ +324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 deleted file mode 100644 index f53f41fd9f865..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35f5a26abb7fd466749fea7edfedae7897192e95 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..b166b97dd7c4d --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 @@ -0,0 +1 @@ +bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.0.jar.sha1 b/server/licenses/lucene-core-7.2.0.jar.sha1 deleted file mode 100644 index 41e1103ca2570..0000000000000 --- a/server/licenses/lucene-core-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f88107aa577ce8edc0a5cee036b485943107a552 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e2fd2d7533737 --- /dev/null +++ b/server/licenses/lucene-core-7.2.1.jar.sha1 @@ -0,0 +1 @@ +91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.0.jar.sha1 b/server/licenses/lucene-grouping-7.2.0.jar.sha1 deleted file mode 100644 index 034534ffef35a..0000000000000 --- a/server/licenses/lucene-grouping-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1536a1a0fd24d0a8c03cfd45d00a52a88f9f52d1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..7537cd21bf326 --- /dev/null +++ b/server/licenses/lucene-grouping-7.2.1.jar.sha1 @@ -0,0 +1 @@ +5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 b/server/licenses/lucene-highlighter-7.2.0.jar.sha1 deleted file mode 100644 index f13d7cc8489bf..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afd4093723520b0cdb59852018b545efeefd544a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..38837afb0a623 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 @@ -0,0 +1 @@ +2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.0.jar.sha1 b/server/licenses/lucene-join-7.2.0.jar.sha1 deleted file mode 100644 index 8cc521e31a007..0000000000000 --- a/server/licenses/lucene-join-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16029d54fa9c99b3187b68791b182a1ea4f78e89 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..c2944aa323e2f --- /dev/null +++ b/server/licenses/lucene-join-7.2.1.jar.sha1 @@ -0,0 +1 @@ +3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.0.jar.sha1 b/server/licenses/lucene-memory-7.2.0.jar.sha1 deleted file mode 100644 index a267d12bd71ba..0000000000000 --- a/server/licenses/lucene-memory-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32f26371224c595f625f061d67fc2edd9c8c836b \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..543e123b2a733 --- /dev/null +++ b/server/licenses/lucene-memory-7.2.1.jar.sha1 @@ -0,0 +1 @@ +21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.0.jar.sha1 b/server/licenses/lucene-misc-7.2.0.jar.sha1 deleted file mode 100644 index d378ea1ae2cc2..0000000000000 --- a/server/licenses/lucene-misc-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1067351bfca1fc72ece5cb4a4f219762b097de36 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..2a9f649d7d527 --- /dev/null +++ b/server/licenses/lucene-misc-7.2.1.jar.sha1 @@ -0,0 +1 @@ +0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.0.jar.sha1 b/server/licenses/lucene-queries-7.2.0.jar.sha1 deleted file mode 100644 index 04b1048ee15dc..0000000000000 --- a/server/licenses/lucene-queries-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0b41af59bc2baed0315abb04621d62e500d094a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e0f2d575e8a2a --- /dev/null +++ b/server/licenses/lucene-queries-7.2.1.jar.sha1 @@ -0,0 +1 @@ +02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 b/server/licenses/lucene-queryparser-7.2.0.jar.sha1 deleted file mode 100644 index bedb4fbd1448b..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a17128e35e5e924cf28c283415d83c7a8935e58 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..56c5dbfa18678 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 @@ -0,0 +1 @@ +a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 b/server/licenses/lucene-sandbox-7.2.0.jar.sha1 deleted file mode 100644 index 62704a0258e92..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fa77169831ec17636357b55bd2c8ca5a97ec7a2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..9445acbdd87d8 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 @@ -0,0 +1 @@ +dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-7.2.0.jar.sha1 deleted file mode 100644 index adcb3b8de7603..0000000000000 --- a/server/licenses/lucene-spatial-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -575f7507d526b2692ae461a4df349e90f048ec77 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..8c1b3d01c2339 --- /dev/null +++ b/server/licenses/lucene-spatial-7.2.1.jar.sha1 @@ -0,0 +1 @@ +09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 deleted file mode 100644 index b9c4e84c78eb0..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f6e31d08dc86bb3edeb6ef132f0920941735e15 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..50422956651d3 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 deleted file mode 100644 index 225d318bcda9d..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f857630bfafde418e6e3cf748fe8d18f7b771a70 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..85aae1cfdd053 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 @@ -0,0 +1 @@ +8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.0.jar.sha1 b/server/licenses/lucene-suggest-7.2.0.jar.sha1 deleted file mode 100644 index f99189e7b9aae..0000000000000 --- a/server/licenses/lucene-suggest-7.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0409ce8d0d7e1203143b5be41aa6dd31d4c1bcf9 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 new file mode 100644 index 0000000000000..e46240d1c6287 --- /dev/null +++ b/server/licenses/lucene-suggest-7.2.1.jar.sha1 @@ -0,0 +1 @@ +1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 09d98b75fe9d2..cfd8485f785f4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -146,7 +146,7 @@ public class Version implements Comparable { public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final Version CURRENT = V_7_0_0_alpha1; static { From 5973c2bf31e3e71ad684fc626fc0cd0c2442c546 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 17:27:51 +0100 Subject: [PATCH 06/94] #28218: Update the Lucene version for 6.2.0 after backport --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index cfd8485f785f4..8a4bc0752be3f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -143,7 +143,7 @@ public class Version implements Comparable { public static final int V_6_1_2_ID = 6010299; public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_0); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); From 3895add2ca11ccb045e1557363682b48331ad8a6 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 15 Jan 2018 09:59:01 -0700 Subject: [PATCH 07/94] Introduce elasticsearch-core jar (#28191) This is related to #27933. It introduces a jar named elasticsearch-core in the lib directory. This commit moves the JarHell class from server to elasticsearch-core. Additionally, PathUtils and some of Loggers are moved as JarHell depends on them. --- build.gradle | 1 + client/rest/build.gradle | 1 + client/sniffer/build.gradle | 1 + client/test/build.gradle | 1 + libs/elasticsearch-core/build.gradle | 81 +++++++ .../licenses/log4j-api-2.9.1.jar.sha1 | 1 + .../licenses/log4j-api-LICENSE.txt | 202 ++++++++++++++++++ .../licenses/log4j-api-NOTICE.txt | 5 + .../org/elasticsearch/bootstrap/JarHell.java | 19 +- .../elasticsearch/bootstrap/JavaVersion.java | 1 + .../common/SuppressForbidden.java | 0 .../elasticsearch/common/io/PathUtils.java | 0 .../common/logging/ESLoggerFactory.java | 9 - .../elasticsearch/common/logging/Loggers.java | 69 ++++++ .../common/logging/PrefixLogger.java | 2 +- .../elasticsearch/bootstrap/JarHellTests.java | 31 +-- .../bootstrap/duplicate-classes.jar | Bin .../bootstrap/duplicate-xmlbeans-classes.jar | Bin .../transport/netty4/ESLoggingHandlerIT.java | 5 +- .../logging/EvilLoggerConfigurationTests.java | 2 +- .../common/logging/EvilLoggerTests.java | 6 +- server/build.gradle | 2 + .../org/elasticsearch/action/bulk/Retry.java | 4 +- .../elasticsearch/bootstrap/Bootstrap.java | 11 +- .../common/component/AbstractComponent.java | 4 +- .../common/logging/LogConfigurator.java | 12 +- .../{Loggers.java => ServerLoggers.java} | 61 +----- .../common/settings/ClusterSettings.java | 18 +- .../common/settings/SettingsModule.java | 5 +- .../discovery/DiscoveryModule.java | 4 +- .../elasticsearch/env/NodeEnvironment.java | 8 +- .../index/AbstractIndexComponent.java | 4 +- .../index/CompositeIndexEventListener.java | 4 +- .../elasticsearch/index/IndexSettings.java | 4 +- .../elasticsearch/index/IndexingSlowLog.java | 6 +- .../elasticsearch/index/SearchSlowLog.java | 10 +- ...ElasticsearchConcurrentMergeScheduler.java | 4 +- .../elasticsearch/index/engine/Engine.java | 4 +- .../plain/DocValuesIndexFieldData.java | 2 - .../RandomScoreFunctionBuilder.java | 2 +- .../shard/AbstractIndexShardComponent.java | 4 +- .../index/similarity/SimilarityService.java | 1 - .../org/elasticsearch/index/store/Store.java | 4 +- .../recovery/RecoverySourceHandler.java | 4 +- .../indices/recovery/RecoveryTarget.java | 4 +- .../java/org/elasticsearch/node/Node.java | 4 +- .../bucket/terms/TermsAggregatorFactory.java | 4 +- .../bootstrap/MaxMapCountCheckTests.java | 10 +- .../cluster/allocation/ClusterRerouteIT.java | 9 +- .../metadata/TemplateUpgradeServiceIT.java | 4 +- .../ExpectedShardSizeAllocationTests.java | 1 - .../allocation/FailedNodeRoutingTests.java | 6 - .../allocation/RebalanceAfterActiveTests.java | 1 - .../service/ClusterApplierServiceTests.java | 10 +- .../cluster/service/MasterServiceTests.java | 9 +- .../common/settings/ScopedSettingsTests.java | 8 +- .../gateway/GatewayIndexStateIT.java | 3 - .../index/MergeSchedulerSettingsTests.java | 17 +- .../index/engine/InternalEngineTests.java | 21 +- settings.gradle | 1 + .../index/store/EsBaseDirectoryTestCase.java | 1 - .../org/elasticsearch/test/TestCluster.java | 1 - .../test/engine/MockEngineSupport.java | 1 - .../test/junit/listeners/LoggingListener.java | 5 +- .../elasticsearch/test/rest/yaml/Stash.java | 1 - .../test/store/MockFSIndexStore.java | 4 +- 66 files changed, 507 insertions(+), 237 deletions(-) create mode 100644 libs/elasticsearch-core/build.gradle create mode 100644 libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 create mode 100644 libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt create mode 100644 libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JarHell.java (94%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java (99%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/SuppressForbidden.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/io/PathUtils.java (100%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java (80%) create mode 100644 libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java (98%) rename {server => libs/elasticsearch-core}/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java (88%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar (100%) rename {server => libs/elasticsearch-core}/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar (100%) rename server/src/main/java/org/elasticsearch/common/logging/{Loggers.java => ServerLoggers.java} (76%) diff --git a/build.gradle b/build.gradle index f7936d5efed71..bb789b7c18b5c 100644 --- a/build.gradle +++ b/build.gradle @@ -183,6 +183,7 @@ subprojects { "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':server', "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', + "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', "org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 1c7e86f799f61..8e0f179634a27 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -72,6 +72,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index bcde806f4df16..03e4a082d274c 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -75,6 +75,7 @@ dependencyLicenses { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false namingConventions { diff --git a/client/test/build.gradle b/client/test/build.gradle index ccc7be81466a4..fd5777cc8df3f 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -49,6 +49,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in +// TODO: Not anymore. Now in elasticsearch-core jarHell.enabled=false // TODO: should we have licenses for our test deps? diff --git a/libs/elasticsearch-core/build.gradle b/libs/elasticsearch-core/build.gradle new file mode 100644 index 0000000000000..4cbee03649bb7 --- /dev/null +++ b/libs/elasticsearch-core/build.gradle @@ -0,0 +1,81 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.optional-base' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-core' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-core' + } + } +} + +forbiddenApisMain { + // elasticsearch-core does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:elasticsearch-core") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from log4j + 'org/osgi/framework/AdaptPermission', + 'org/osgi/framework/AdminPermission', + 'org/osgi/framework/Bundle', + 'org/osgi/framework/BundleActivator', + 'org/osgi/framework/BundleContext', + 'org/osgi/framework/BundleEvent', + 'org/osgi/framework/SynchronousBundleListener', + 'org/osgi/framework/wiring/BundleWire', + 'org/osgi/framework/wiring/BundleWiring' +] \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..e1a89fadfed95 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-2.9.1.jar.sha1 @@ -0,0 +1 @@ +7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/libs/elasticsearch-core/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java similarity index 94% rename from server/src/main/java/org/elasticsearch/bootstrap/JarHell.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 1959e5e81394b..0e5c9597b7ec8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -20,7 +20,6 @@ package org.elasticsearch.bootstrap; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.Loggers; @@ -120,7 +119,8 @@ static Set parseClassPath(String classPath) { // } // Instead we just throw an exception, and keep it clean. if (element.isEmpty()) { - throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous version?) classpath='" + classPath + "'"); + throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous" + + " version?) classpath='" + classPath + "'"); } // we should be able to just Paths.get() each element, but unfortunately this is not the // whole story on how classpath parsing works: if you want to know, start at sun.misc.Launcher, @@ -215,21 +215,13 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO } /** inspect manifest for sure incompatibilities */ - static void checkManifest(Manifest manifest, Path jar) { + private static void checkManifest(Manifest manifest, Path jar) { // give a nice error if jar requires a newer java version String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); if (targetVersion != null) { checkVersionFormat(targetVersion); checkJavaVersion(jar.toString(), targetVersion); } - - // give a nice error if jar is compiled against different es version - String systemESVersion = Version.CURRENT.toString(); - String targetESVersion = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); - if (targetESVersion != null && targetESVersion.equals(systemESVersion) == false) { - throw new IllegalStateException(jar + " requires Elasticsearch " + targetESVersion - + ", your system: " + systemESVersion); - } } public static void checkVersionFormat(String targetVersion) { @@ -237,7 +229,8 @@ public static void checkVersionFormat(String targetVersion) { throw new IllegalStateException( String.format( Locale.ROOT, - "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s", + "version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have " + + "leading zeros but was %s", targetVersion ) ); @@ -263,7 +256,7 @@ public static void checkJavaVersion(String resource, String targetVersion) { } } - static void checkClass(Map clazzes, String clazz, Path jarpath) { + private static void checkClass(Map clazzes, String clazz, Path jarpath) { Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java similarity index 99% rename from server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index 03722e03060a7..f22087c6e7d8d 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -26,6 +26,7 @@ import java.util.stream.Collectors; public class JavaVersion implements Comparable { + private final List version; public List getVersion() { diff --git a/server/src/main/java/org/elasticsearch/common/SuppressForbidden.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/SuppressForbidden.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java diff --git a/server/src/main/java/org/elasticsearch/common/io/PathUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/io/PathUtils.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java similarity index 80% rename from server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index d8f2ebe9be843..44d7d17b59325 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -19,12 +19,9 @@ package org.elasticsearch.common.logging; -import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.spi.ExtendedLogger; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; /** * Factory to get {@link Logger}s @@ -35,12 +32,6 @@ private ESLoggerFactory() { } - public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope); - public static final Setting.AffixSetting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic, - Property.NodeScope)); - public static Logger getLogger(String prefix, String name) { return getLogger(prefix, LogManager.getLogger(name)); } diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java new file mode 100644 index 0000000000000..89073bdce54c4 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Logger; + +public class Loggers { + + public static final String SPACE = " "; + + public static Logger getLogger(Logger parentLogger, String s) { + assert parentLogger instanceof PrefixLogger; + return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); + } + + public static Logger getLogger(String s) { + return ESLoggerFactory.getLogger(s); + } + + public static Logger getLogger(Class clazz) { + return ESLoggerFactory.getLogger(clazz); + } + + public static Logger getLogger(Class clazz, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); + } + + public static Logger getLogger(String name, String... prefixes) { + return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); + } + + private static String formatPrefix(String... prefixes) { + String prefix = null; + if (prefixes != null && prefixes.length > 0) { + StringBuilder sb = new StringBuilder(); + for (String prefixX : prefixes) { + if (prefixX != null) { + if (prefixX.equals(SPACE)) { + sb.append(" "); + } else { + sb.append("[").append(prefixX).append("]"); + } + } + } + if (sb.length() > 0) { + sb.append(" "); + prefix = sb.toString(); + } + } + return prefix; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java index a78330c3e8564..b24e839690366 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/logging/PrefixLogger.java @@ -32,7 +32,7 @@ * A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so * for the prefixes to appear, the logging layout pattern must include the marker in its pattern. */ -class PrefixLogger extends ExtendedLoggerWrapper { +public class PrefixLogger extends ExtendedLoggerWrapper { /* * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; diff --git a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java similarity index 88% rename from server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java rename to libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index 7003ef3d81efe..b3dee0b004584 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; @@ -164,7 +163,8 @@ public void testBadJDKVersionInJar() throws Exception { JarHell.checkJarHell(jars); fail("did not get expected exception"); } catch (IllegalStateException e) { - assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was bogus")); + assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated " + + "by \".\"'s and may have leading zeros but was bogus")); } } @@ -178,33 +178,6 @@ public void testRequiredJDKVersionIsOK() throws Exception { JarHell.checkJarHell(jars); } - /** make sure if a plugin is compiled against the same ES version, it works */ - public void testGoodESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString()); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - JarHell.checkJarHell(jars); - } - - /** make sure if a plugin is compiled against a different ES version, it fails */ - public void testBadESVersionInJar() throws Exception { - Path dir = createTempDir(); - Manifest manifest = new Manifest(); - Attributes attributes = manifest.getMainAttributes(); - attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); - attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus"); - Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); - try { - JarHell.checkJarHell(jars); - fail("did not get expected exception"); - } catch (IllegalStateException e) { - assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus")); - } - } - public void testValidVersions() { String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"}; for (String version : versions) { diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar diff --git a/server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar b/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar similarity index 100% rename from server/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar rename to libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index acd71749e2333..67368cb577a81 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -36,12 +37,12 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); + ServerLoggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender); appender.stop(); super.tearDown(); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index f53c9d3b1f5e7..8dab47bd1ceee 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -138,7 +138,7 @@ public void testHierarchy() throws Exception { assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG)); final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); - Loggers.setLevel(ESLoggerFactory.getLogger("x"), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("x"), level); assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level)); assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index d4bc754689e68..55e359697eb15 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -285,12 +285,12 @@ public void testFindAppender() throws IOException, UserException { final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender"); - final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class); + final Appender testLoggerConsoleAppender = ServerLoggers.findAppender(hasConsoleAppender, ConsoleAppender.class); assertNotNull(testLoggerConsoleAppender); assertThat(testLoggerConsoleAppender.getName(), equalTo("console")); final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender"); - assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); - final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); + assertNull(ServerLoggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class)); + final Appender countingNoOpAppender = ServerLoggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class); assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op")); } diff --git a/server/build.gradle b/server/build.gradle index 20693a30c0cec..4f69c2ee159b5 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -38,6 +38,8 @@ archivesBaseName = 'elasticsearch' dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index 9985d23b9badb..b173fc074bd82 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -22,7 +22,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -102,7 +102,7 @@ static class RetryHandler implements ActionListener { this.backoff = backoffPolicy.iterator(); this.consumer = consumer; this.listener = listener; - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.scheduler = scheduler; // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood this.startTimestampNanos = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 0a3d7f675c234..2f86489bce39f 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -300,9 +301,9 @@ static void init( try { if (closeStandardStreams) { final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } closeSystOut(); } @@ -333,9 +334,9 @@ static void init( } catch (NodeValidationException | RuntimeException e) { // disable console logging, so user does not see the exception twice (jvm will show it already) final Logger rootLogger = ESLoggerFactory.getRootLogger(); - final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class); + final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class); if (foreground && maybeConsoleAppender != null) { - Loggers.removeAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender); } Logger logger = Loggers.getLogger(Bootstrap.class); if (INSTANCE.node != null) { @@ -368,7 +369,7 @@ static void init( } // re-enable it if appropriate, so they can see any logging during the shutdown process if (foreground && maybeConsoleAppender != null) { - Loggers.addAppender(rootLogger, maybeConsoleAppender); + ServerLoggers.addAppender(rootLogger, maybeConsoleAppender); } throw e; diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java index 8cb51f2b06b0e..f335a754f3771 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractComponent.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; @@ -34,7 +34,7 @@ public abstract class AbstractComponent { protected final Settings settings; public AbstractComponent(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.deprecationLogger = new DeprecationLogger(logger); this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index b97fc13e73038..b38c3d3bdd78e 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -177,15 +177,15 @@ private static void configureStatusLogger() { * @param settings the settings from which logger levels will be extracted */ private static void configureLoggerLevels(final Settings settings) { - if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { - final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings); - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + if (ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { + final Level level = ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } - ESLoggerFactory.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) + ServerLoggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings) // do not set a log level for a logger named level (from the default log setting) - .filter(s -> s.getKey().equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { + .filter(s -> s.getKey().equals(ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> { final Level level = s.get(settings); - Loggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level); }); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java similarity index 76% rename from server/src/main/java/org/elasticsearch/common/logging/Loggers.java rename to server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java index 812a0b70f2877..99049c53d1637 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ServerLoggers.java @@ -27,28 +27,29 @@ import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.config.LoggerConfig; -import org.apache.logging.log4j.message.MessageFactory; -import org.elasticsearch.common.Classes; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; import static java.util.Arrays.asList; -import static javax.security.auth.login.Configuration.getConfiguration; import static org.elasticsearch.common.util.CollectionUtils.asArrayList; /** * A set of utilities around Logging. */ -public class Loggers { +public class ServerLoggers { - public static final String SPACE = " "; + public static final Setting LOG_DEFAULT_LEVEL_SETTING = + new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Setting.Property.NodeScope); + public static final Setting.AffixSetting LOG_LEVEL_SETTING = + Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, + Setting.Property.NodeScope)); public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); @@ -64,17 +65,17 @@ public static Logger getLogger(String loggerName, Settings settings, ShardId sha } public static Logger getLogger(Class clazz, Settings settings, Index index, String... prefixes) { - return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0])); + return getLogger(clazz, settings, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); } public static Logger getLogger(Class clazz, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()])); } public static Logger getLogger(String loggerName, Settings settings, String... prefixes) { final List prefixesList = prefixesList(settings, prefixes); - return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); + return Loggers.getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()])); } private static List prefixesList(Settings settings, String... prefixes) { @@ -88,48 +89,6 @@ private static List prefixesList(Settings settings, String... prefixes) return prefixesList; } - public static Logger getLogger(Logger parentLogger, String s) { - assert parentLogger instanceof PrefixLogger; - return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s); - } - - public static Logger getLogger(String s) { - return ESLoggerFactory.getLogger(s); - } - - public static Logger getLogger(Class clazz) { - return ESLoggerFactory.getLogger(clazz); - } - - public static Logger getLogger(Class clazz, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz); - } - - public static Logger getLogger(String name, String... prefixes) { - return ESLoggerFactory.getLogger(formatPrefix(prefixes), name); - } - - private static String formatPrefix(String... prefixes) { - String prefix = null; - if (prefixes != null && prefixes.length > 0) { - StringBuilder sb = new StringBuilder(); - for (String prefixX : prefixes) { - if (prefixX != null) { - if (prefixX.equals(SPACE)) { - sb.append(" "); - } else { - sb.append("[").append(prefixX).append("]"); - } - } - } - if (sb.length() > 0) { - sb.append(" "); - prefix = sb.toString(); - } - } - return prefix; - } - /** * Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null * level. diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index db8dd461dd737..aec14415db3fc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public ClusterSettings(Settings nodeSettings, Set> settingsSet) { } private static final class LoggingSettingUpdater implements SettingUpdater { - final Predicate loggerPredicate = ESLoggerFactory.LOG_LEVEL_SETTING::match; + final Predicate loggerPredicate = ServerLoggers.LOG_LEVEL_SETTING::match; private final Settings settings; LoggingSettingUpdater(Settings settings) { @@ -129,10 +129,10 @@ public Settings getValue(Settings current, Settings previous) { builder.put(current.filter(loggerPredicate)); for (String key : previous.keySet()) { if (loggerPredicate.test(key) && builder.keys().contains(key) == false) { - if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { + if (ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) { builder.putNull(key); } else { - builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); + builder.put(key, ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString()); } } } @@ -150,12 +150,12 @@ public void apply(Settings value, Settings current, Settings previous) { if ("_root".equals(component)) { final String rootLevel = value.get(key); if (rootLevel == null) { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings)); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings)); } else { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel); } } else { - Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); + ServerLoggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key)); } } } @@ -379,8 +379,8 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, - ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING, - ESLoggerFactory.LOG_LEVEL_SETTING, + ServerLoggers.LOG_DEFAULT_LEVEL_SETTING, + ServerLoggers.LOG_LEVEL_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, OsService.REFRESH_INTERVAL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 0304b20e992e5..20253f7876880 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -35,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -58,7 +57,7 @@ public SettingsModule(Settings settings, Setting... additionalSettings) { } public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { - logger = Loggers.getLogger(getClass(), settings); + logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { registerSetting(setting); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 179692cd516c8..b2602e8f2c596 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -109,7 +109,7 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic if (discoverySupplier == null) { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } - Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); + ServerLoggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); discovery = Objects.requireNonNull(discoverySupplier.get()); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 172e3687e3931..ecf0b31934c26 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -182,7 +182,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce locks = null; nodeLockId = -1; nodeMetaData = new NodeMetaData(generateNodeId(settings)); - logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); return; } final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; @@ -190,7 +190,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce boolean success = false; // trace logger to debug issues before the default node name is derived from the node id - Logger startupTraceLogger = Loggers.getLogger(getClass(), settings); + Logger startupTraceLogger = ServerLoggers.getLogger(getClass(), settings); try { sharedDataPath = environment.sharedDataFile(); @@ -244,7 +244,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce throw new IllegalStateException(message, lastException); } this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); - this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); + this.logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); this.nodeLockId = nodeLockId; this.locks = locks; diff --git a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java index 25acdd06b44a6..ce13c12c8496f 100644 --- a/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java +++ b/server/src/main/java/org/elasticsearch/index/AbstractIndexComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; public abstract class AbstractIndexComponent implements IndexComponent { @@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent { * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..e50ddd8e3966c 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -24,7 +24,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -52,7 +52,7 @@ final class CompositeIndexEventListener implements IndexEventListener { } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); - this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); + this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 8fc23e79d0557..5baca022a216a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -374,7 +374,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build(); this.index = indexMetaData.getIndex(); version = Version.indexCreated(settings); - logger = Loggers.getLogger(getClass(), settings, index); + logger = ServerLoggers.getLogger(getClass(), settings, index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 94c3892ef361e..53d63bf64bb6b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -87,7 +87,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.indexLogger = ServerLoggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); @@ -117,7 +117,7 @@ private void setMaxSourceCharsToLog(int maxSourceCharsToLog) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(this.indexLogger, level.name()); + ServerLoggers.setLevel(this.indexLogger, level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index a48e3d7bd72c5..d02d4820fd402 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; @@ -81,8 +81,8 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings) { - this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); - this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); + this.queryLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings()); + this.fetchLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings()); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); @@ -108,8 +108,8 @@ public SearchSlowLog(IndexSettings indexSettings) { private void setLevel(SlowLogLevel level) { this.level = level; - Loggers.setLevel(queryLogger, level.name()); - Loggers.setLevel(fetchLogger, level.name()); + ServerLoggers.setLevel(queryLogger, level.name()); + ServerLoggers.setLevel(fetchLogger, level.name()); } @Override public void onQueryPhase(SearchContext context, long tookInNanos) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index f4876149cac13..871f1f62f41be 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.OneMergeHelper; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings, shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b0e2654e7f2fb..b73bfb78f3cb9 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -51,7 +51,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -130,7 +130,7 @@ protected Engine(EngineConfig engineConfig) { this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); - this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name + this.logger = ServerLoggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 698b289d758be..2384e34732040 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.fielddata.plain; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.SortedSetDocValues; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexFieldData; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index d3d9ffa481871..d7ce32d9b7628 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -120,7 +120,7 @@ public Integer getSeed() { /** * Set the field to be used for random number generation. This parameter is compulsory * when a {@link #seed(int) seed} is set and ignored otherwise. Note that documents that - * have the same value for a field will get the same score. + * have the same value for a field will get the same score. */ public RandomScoreFunctionBuilder setField(String field) { this.field = field; diff --git a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java index 0e46a562488d3..1d02c33dd3e1b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java +++ b/server/src/main/java/org/elasticsearch/index/shard/AbstractIndexShardComponent.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.index.IndexSettings; public abstract class AbstractIndexShardComponent implements IndexShardComponent { @@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; - this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); + this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId); this.deprecationLogger = new DeprecationLogger(logger); } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index e1080f2c2ccae..16afb55599d49 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index dab39c26a3c5b..74be98b813238 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -58,7 +58,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -159,7 +159,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService dire public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); - this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId)); + this.directory = new StoreDirectory(directoryService.newDirectory(), ServerLoggers.getLogger("index.store.deletes", settings, shardId)); this.shardLock = shardLock; this.onClose = onClose; final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3ee9b953757c3..5a0ee1cf44d07 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -120,7 +120,7 @@ public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recov this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = ServerLoggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1bbcb9efa9644..f4c823c0e96a7 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; @@ -117,7 +117,7 @@ public RecoveryTarget(final IndexShard indexShard, this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 62fc271f99084..630afe4579bd1 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; @@ -143,7 +144,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; -import java.net.Inet6Address; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; @@ -267,7 +267,7 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = Loggers.getLogger(Node.class, tmpSettings); + Logger logger = ServerLoggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); // this must be captured after the node name is possibly added to the settings diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 1027785c57711..a6481b58ca499 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -259,8 +259,8 @@ Aggregator create(String name, final long maxOrd = getMaxOrd(valuesSource, context.searcher()); assert maxOrd != -1; - final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); - + final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs()); + if (factories == AggregatorFactories.EMPTY && includeExclude == null && Aggregator.descendsFromBucketAggregator(parent) == false && diff --git a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java index c5b99a91ffa3b..2c51c210b1edc 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/MaxMapCountCheckTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -83,11 +83,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e)); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } @@ -105,11 +105,11 @@ BufferedReader getBufferedReader(Path path) throws IOException { "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\""))); - Loggers.addAppender(logger, appender); + ServerLoggers.addAppender(logger, appender); assertThat(check.getMaxMapCount(logger), equalTo(-1L)); appender.assertAllExpectationsMatched(); verify(reader).close(); - Loggers.removeAppender(logger, appender); + ServerLoggers.removeAppender(logger, appender); appender.stop(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 0522f3f15f817..b8050d728a6b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -342,7 +343,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no completed message logged on dry run", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*") ); - Loggers.addAppender(actionLogger, dryRunMockLog); + ServerLoggers.addAppender(actionLogger, dryRunMockLog); AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); ClusterRerouteResponse dryRunResponse = client().admin().cluster().prepareReroute() @@ -357,7 +358,7 @@ public void testMessageLogging() throws Exception{ dryRunMockLog.assertAllExpectationsMatched(); dryRunMockLog.stop(); - Loggers.removeAppender(actionLogger, dryRunMockLog); + ServerLoggers.removeAppender(actionLogger, dryRunMockLog); MockLogAppender allocateMockLog = new MockLogAppender(); allocateMockLog.start(); @@ -369,7 +370,7 @@ public void testMessageLogging() throws Exception{ new MockLogAppender.UnseenEventExpectation("no message for second allocate empty primary", TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*" + nodeName2 + "*") ); - Loggers.addAppender(actionLogger, allocateMockLog); + ServerLoggers.addAppender(actionLogger, allocateMockLog); AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); @@ -385,7 +386,7 @@ public void testMessageLogging() throws Exception{ allocateMockLog.assertAllExpectationsMatched(); allocateMockLog.stop(); - Loggers.removeAppender(actionLogger, allocateMockLog); + ServerLoggers.removeAppender(actionLogger, allocateMockLog); } public void testClusterRerouteWithBlocks() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index c8d5cdc6c86db..be03fbe1cd640 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -63,7 +63,7 @@ public static class TestPlugin extends Plugin { protected final Settings settings; public TestPlugin(Settings settings) { - this.logger = Loggers.getLogger(getClass(), settings); + this.logger = ServerLoggers.getLogger(getClass(), settings); this.settings = settings; } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 1ed5a3ac7ed90..8ebe627751ce4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b551e912947a..4b941a6ce4a7f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; @@ -41,24 +40,19 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase; import org.elasticsearch.indices.cluster.ClusterStateChanges; -import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index f6ab967a10b46..1406e4d6d6121 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 34750180ff185..c104df913b205 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -130,7 +130,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*failed to execute cluster state applier in [2s]*")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(3); clusterApplierService.currentTimeOverride = System.nanoTime(); @@ -180,7 +180,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -210,7 +210,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state applier task [test3] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service"); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -276,7 +276,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 1b747f2268747..3b999b5f7733a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -231,7 +232,7 @@ public void testClusterStateUpdateLogging() throws Exception { "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); masterService.currentTimeOverride = System.nanoTime(); @@ -306,7 +307,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); @@ -578,7 +579,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { "*cluster state update task [test4] took [34s] above the warn threshold of *")); Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName()); - Loggers.addAppender(clusterLogger, mockAppender); + ServerLoggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); final CountDownLatch processedFirstTask = new CountDownLatch(1); @@ -674,7 +675,7 @@ public void onFailure(String source, Exception e) { }); latch.await(); } finally { - Loggers.removeAppender(clusterLogger, mockAppender); + ServerLoggers.removeAppender(clusterLogger, mockAppender); mockAppender.stop(); } mockAppender.assertAllExpectationsMatched(); diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 2015a6b42d16f..29c7a2b161403 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -751,8 +751,8 @@ public void testLoggingUpdates() { settings.applySettings(Settings.builder().build()); assertEquals(property, ESLoggerFactory.getLogger("test").getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); - Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel); } } @@ -767,7 +767,7 @@ public void testFallbackToLoggerLevel() { settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default. assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel()); } finally { - Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); + ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 880f8dcba5de5..aeadcf30e3678 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -52,8 +51,6 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index e9eb5d8b83d2e..301d4e3cfa360 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -71,8 +72,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -91,9 +92,9 @@ public void testUpdateAutoThrottleSettings() throws Exception { assertTrue(mockAppender.sawUpdateAutoThrottle); assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } @@ -102,8 +103,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings"); mockAppender.start(); final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings"); - Loggers.addAppender(settingsLogger, mockAppender); - Loggers.setLevel(settingsLogger, Level.TRACE); + ServerLoggers.addAppender(settingsLogger, mockAppender); + ServerLoggers.setLevel(settingsLogger, Level.TRACE); try { Settings.Builder builder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -123,9 +124,9 @@ public void testUpdateMergeMaxThreadCount() throws Exception { // Make sure we log the change: assertTrue(mockAppender.sawUpdateMaxThreadCount); } finally { - Loggers.removeAppender(settingsLogger, mockAppender); + ServerLoggers.removeAppender(settingsLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(settingsLogger, (Level) null); + ServerLoggers.setLevel(settingsLogger, (Level) null); } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a508d691ed3a6..518411e59e8cd 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -1924,8 +1925,8 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti Logger rootLogger = LogManager.getRootLogger(); Level savedLevel = rootLogger.getLevel(); - Loggers.addAppender(rootLogger, mockAppender); - Loggers.setLevel(rootLogger, Level.DEBUG); + ServerLoggers.addAppender(rootLogger, mockAppender); + ServerLoggers.setLevel(rootLogger, Level.DEBUG); rootLogger = LogManager.getRootLogger(); try { @@ -1936,15 +1937,15 @@ public void testIndexWriterInfoStream() throws IllegalAccessException, IOExcepti assertFalse(mockAppender.sawIndexWriterMessage); // Again, with TRACE, which should log IndexWriter output: - Loggers.setLevel(rootLogger, Level.TRACE); + ServerLoggers.setLevel(rootLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertTrue(mockAppender.sawIndexWriterMessage); } finally { - Loggers.removeAppender(rootLogger, mockAppender); + ServerLoggers.removeAppender(rootLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(rootLogger, savedLevel); + ServerLoggers.setLevel(rootLogger, savedLevel); } } @@ -2214,8 +2215,8 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD"); - Loggers.addAppender(iwIFDLogger, mockAppender); - Loggers.setLevel(iwIFDLogger, Level.DEBUG); + ServerLoggers.addAppender(iwIFDLogger, mockAppender); + ServerLoggers.setLevel(iwIFDLogger, Level.DEBUG); try { // First, with DEBUG, which should NOT log IndexWriter output: @@ -2226,16 +2227,16 @@ public void testIndexWriterIFDInfoStream() throws IllegalAccessException, IOExce assertFalse(mockAppender.sawIndexWriterIFDMessage); // Again, with TRACE, which should only log IndexWriter IFD output: - Loggers.setLevel(iwIFDLogger, Level.TRACE); + ServerLoggers.setLevel(iwIFDLogger, Level.TRACE); engine.index(indexForDoc(doc)); engine.flush(); assertFalse(mockAppender.sawIndexWriterMessage); assertTrue(mockAppender.sawIndexWriterIFDMessage); } finally { - Loggers.removeAppender(iwIFDLogger, mockAppender); + ServerLoggers.removeAppender(iwIFDLogger, mockAppender); mockAppender.stop(); - Loggers.setLevel(iwIFDLogger, (Level) null); + ServerLoggers.setLevel(iwIFDLogger, (Level) null); } } diff --git a/settings.gradle b/settings.gradle index 40034a8542448..b844af52df76b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -28,6 +28,7 @@ List projects = [ 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', 'test:logger-usage', + 'libs:elasticsearch-core', 'libs:elasticsearch-nio', 'modules:aggs-matrix-stats', 'modules:analysis-common', diff --git a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java index 69dfae2c6788c..c078e88da20ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/store/EsBaseDirectoryTestCase.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.TimeUnits; import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 470847e65f25f..a11b70bfa104e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -35,7 +35,6 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.List; import java.util.Random; import java.util.Set; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index 144b2be1b0235..f30c498b21020 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.Loggers; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java index e021df52c60fe..60cc6ceeccfa7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.runner.Description; @@ -106,7 +107,7 @@ private Map processTestLogging(final TestLogging testLogging) { } for (final Map.Entry entry : map.entrySet()) { final Logger logger = resolveLogger(entry.getKey()); - Loggers.setLevel(logger, entry.getValue()); + ServerLoggers.setLevel(logger, entry.getValue()); } return existing; } @@ -145,7 +146,7 @@ private static Map getLoggersAndLevelsFromAnnotation(final TestL private Map reset(final Map map) { for (final Map.Entry previousLogger : map.entrySet()) { final Logger logger = resolveLogger(previousLogger.getKey()); - Loggers.setLevel(logger, previousLogger.getValue()); + ServerLoggers.setLevel(logger, previousLogger.getValue()); } return Collections.emptyMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java index e2eefc6376ad1..c7b8e0fef2f9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Stash.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..858a8ebd5ed0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -21,7 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -95,7 +95,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } From 190f1e1fb317a9f9e1e1d11e9df60c0aeb7e267c Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:00:20 +0100 Subject: [PATCH 08/94] Fix synonym phrase query expansion for cross_fields parsing (#28045) * Fix synonym phrase query expansion for cross_fields parsing The `cross_fields` mode for query parser ignores phrase query generated by multi-word synonyms. In such case only the first field of each analyzer group is kept. This change fixes this issue by expanding the phrase query for each analyzer group to **all** fields using a disjunction max query. --- .../index/search/MatchQuery.java | 17 ++++++- .../index/search/MultiMatchQuery.java | 47 +++++++++++++++++- .../index/search/MultiMatchQueryTests.java | 49 +++++++++++++++++++ 3 files changed, 110 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index f37b1d6f47012..d6a0bf5f73802 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.MultiTermQuery; @@ -350,7 +351,12 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws throw exc; } } - return super.analyzePhrase(field, stream, slop); + Query query = super.analyzePhrase(field, stream, slop); + if (query instanceof PhraseQuery) { + // synonyms that expand to multiple terms can return a phrase query. + return blendPhraseQuery((PhraseQuery) query, mapper); + } + return query; } /** @@ -472,6 +478,14 @@ private Query boolToExtendedCommonTermsQuery(BooleanQuery bq, Occur highFreqOccu } } + /** + * Called when a phrase query is built with {@link QueryBuilder#analyzePhrase(String, TokenStream, int)}. + * Subclass can override this function to blend this query to multiple fields. + */ + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + return query; + } + protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return new SynonymQuery(terms); } @@ -494,5 +508,4 @@ protected Query blendTermQuery(Term term, MappedFieldType fieldType) { } return termQuery(fieldType, term.bytes(), lenient); } - } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 61029f70e8f19..8a85c67b6815f 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -25,10 +25,10 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -143,6 +143,10 @@ public Query blendTerms(Term[] terms, MappedFieldType fieldType) { public Query termQuery(MappedFieldType fieldType, BytesRef value) { return MultiMatchQuery.this.termQuery(fieldType, value, lenient); } + + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + return MultiMatchQuery.super.blendPhraseQuery(query, type); + } } final class CrossFieldsQueryBuilder extends QueryBuilder { @@ -226,6 +230,17 @@ public Query termQuery(MappedFieldType fieldType, BytesRef value) { */ return blendTerm(new Term(fieldType.name(), value.utf8ToString()), fieldType); } + + @Override + public Query blendPhrase(PhraseQuery query, MappedFieldType type) { + if (blendedFields == null) { + return super.blendPhrase(query, type); + } + /** + * We build phrase queries for multi-word synonyms when {@link QueryBuilder#autoGenerateSynonymsPhraseQuery} is true. + */ + return MultiMatchQuery.blendPhrase(query, blendedFields); + } } static Query blendTerm(QueryShardContext context, BytesRef value, Float commonTermsCutoff, float tieBreaker, @@ -288,6 +303,28 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm } } + /** + * Expand a {@link PhraseQuery} to multiple fields that share the same analyzer. + * Returns a {@link DisjunctionMaxQuery} with a disjunction for each expanded field. + */ + static Query blendPhrase(PhraseQuery query, FieldAndFieldType... fields) { + List disjunctions = new ArrayList<>(); + for (FieldAndFieldType field : fields) { + int[] positions = query.getPositions(); + Term[] terms = query.getTerms(); + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + for (int i = 0; i < terms.length; i++) { + builder.add(new Term(field.fieldType.name(), terms[i].bytes()), positions[i]); + } + Query q = builder.build(); + if (field.boost != AbstractQueryBuilder.DEFAULT_BOOST) { + q = new BoostQuery(q, field.boost); + } + disjunctions.add(q); + } + return new DisjunctionMaxQuery(disjunctions, 0.0f); + } + @Override protected Query blendTermQuery(Term term, MappedFieldType fieldType) { if (queryBuilder == null) { @@ -304,6 +341,14 @@ protected Query blendTermsQuery(Term[] terms, MappedFieldType fieldType) { return queryBuilder.blendTerms(terms, fieldType); } + @Override + protected Query blendPhraseQuery(PhraseQuery query, MappedFieldType fieldType) { + if (queryBuilder == null) { + return super.blendPhraseQuery(query, fieldType); + } + return queryBuilder.blendPhrase(query, fieldType); + } + static final class FieldAndFieldType { final MappedFieldType fieldType; final float boost; diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 5695094553de9..1f033b5fb4187 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -19,12 +19,16 @@ package org.elasticsearch.index.search; +import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -43,7 +47,11 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.hamcrest.Matchers.equalTo; @@ -220,4 +228,45 @@ public void testMultiMatchCrossFieldsWithSynonyms() throws IOException { assertThat(parsedQuery, equalTo(expectedQuery)); } + + public void testMultiMatchCrossFieldsWithSynonymsPhrase() throws IOException { + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null); + MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); + parser.setAnalyzer(new MockSynonymAnalyzer()); + Map fieldNames = new HashMap<>(); + fieldNames.put("name.first", 1.0f); + fieldNames.put("name.last", 1.0f); + Query query = parser.parse(MultiMatchQueryBuilder.Type.CROSS_FIELDS, fieldNames, "guinea pig", null); + + Term[] terms = new Term[2]; + terms[0] = new Term("name.first", "cavy"); + terms[1] = new Term("name.last", "cavy"); + float[] boosts = new float[2]; + Arrays.fill(boosts, 1.0f); + + List phraseDisjuncts = new ArrayList<>(); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.first", "guinea")) + .add(new Term("name.first", "pig")) + .build() + ); + phraseDisjuncts.add( + new PhraseQuery.Builder() + .add(new Term("name.last", "guinea")) + .add(new Term("name.last", "pig")) + .build() + ); + BooleanQuery expected = new BooleanQuery.Builder() + .add( + new BooleanQuery.Builder() + .add(new DisjunctionMaxQuery(phraseDisjuncts, 0.0f), BooleanClause.Occur.SHOULD) + .add(BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD + ) + .build(); + assertEquals(expected, query); + } } From ee7eac8dc19f2a5f77318040a9eb96c3d0a3d257 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 15 Jan 2018 10:20:30 -0700 Subject: [PATCH 09/94] `MockTcpTransport` to connect asynchronously (#28203) The method `initiateChannel` on `TcpTransport` is explicit in that channels can be connect asynchronously. All production implementations do connect asynchronously. Only the blocking `MockTcpTransport` connects in a synchronous manner. This avoids testing some of the blocking code in `TcpTransport` that waits on connections to complete. Additionally, it requires a more extensive method signature than required for other transports. This commit modifies the `MockTcpTransport` to make these connections asynchronously on a different thread. Additionally, it simplifies that `initiateChannel` method signature. --- .../transport/netty4/Netty4Transport.java | 8 +-- .../transport/nio/NioTransport.java | 7 +-- .../elasticsearch/transport/TcpTransport.java | 13 ++-- .../transport/TcpTransportTests.java | 6 +- .../transport/MockTcpTransport.java | 61 +++++++++++-------- .../transport/nio/MockNioTransport.java | 7 +-- 6 files changed, 46 insertions(+), 56 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 67b1607aa8a3a..f4818a2e56752 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -40,7 +40,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -51,12 +50,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportRequestOptions; @@ -239,9 +236,8 @@ protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) } @Override - protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener listener) - throws IOException { - ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address()); + protected NettyTcpChannel initiateChannel(InetSocketAddress address, ActionListener listener) throws IOException { + ChannelFuture channelFuture = bootstrap.connect(address); Channel channel = channelFuture.channel(); if (channel == null) { Netty4Utils.maybeDie(channelFuture.cause()); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 42063878b4b2f..9917bf79f593b 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,14 +21,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -93,9 +91,8 @@ protected TcpNioServerSocketChannel bind(String name, InetSocketAddress address) } @Override - protected TcpNioSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - TcpNioSocketChannel channel = nioGroup.openChannel(node.getAddress().address(), clientChannelFactory); + protected TcpNioSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + TcpNioSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 54bfcaa6027d7..727ce2f157b31 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -604,7 +604,7 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c try { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture); + TcpChannel channel = initiateChannel(node.getAddress().address(), connectFuture); logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); channels.add(channel); } catch (Exception e) { @@ -1057,17 +1057,14 @@ protected void serverAcceptedChannel(TcpChannel channel) { protected abstract TcpChannel bind(String name, InetSocketAddress address) throws IOException; /** - * Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout. - * It is provided for synchronous connection implementations. + * Initiate a single tcp socket channel. * - * @param node the node - * @param connectTimeout the connection timeout - * @param connectListener listener to be called when connection complete + * @param address address for the initiated connection + * @param connectListener listener to be called when connection complete * @return the pending connection * @throws IOException if an I/O exception occurs while opening the channel */ - protected abstract TcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException; + protected abstract TcpChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException; /** * Called to tear down internal resources diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index f63cd1c7a3e93..2cedb5419e08e 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -41,7 +40,6 @@ import java.io.IOException; import java.io.StreamCorruptedException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -49,7 +47,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; /** Unit tests for {@link TcpTransport} */ public class TcpTransportTests extends ESTestCase { @@ -193,8 +190,7 @@ protected FakeChannel bind(String name, InetSocketAddress address) throws IOExce } @Override - protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { + protected FakeChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { return new FakeChannel(messageCaptor); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 91b2a2f79e310..570827bd54ee4 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -21,7 +21,6 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -30,7 +29,6 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -49,7 +47,6 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.SocketException; -import java.net.SocketTimeoutException; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -61,7 +58,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; /** * This is a socket based blocking TcpTransport implementation that is used for tests @@ -164,28 +160,32 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx } @Override - protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - InetSocketAddress address = node.getAddress().address(); + protected MockChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { final MockSocket socket = new MockSocket(); + final MockChannel channel = new MockChannel(socket, address, "none"); + boolean success = false; try { configureSocket(socket); - try { - socket.connect(address, Math.toIntExact(connectTimeout.millis())); - } catch (SocketTimeoutException ex) { - throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", ex); - } - MockChannel channel = new MockChannel(socket, address, "none", (c) -> {}); - channel.loopRead(executor); success = true; - connectListener.onResponse(null); - return channel; } finally { if (success == false) { IOUtils.close(socket); } + } + + executor.submit(() -> { + try { + socket.connect(address); + channel.loopRead(executor); + connectListener.onResponse(null); + } catch (Exception ex) { + connectListener.onFailure(ex); + } + }); + + return channel; } @Override @@ -218,7 +218,6 @@ public final class MockChannel implements Closeable, TcpChannel { private final Socket activeChannel; private final String profile; private final CancellableThreads cancellableThreads = new CancellableThreads(); - private final Closeable onClose; private final CompletableFuture closeFuture = new CompletableFuture<>(); /** @@ -227,14 +226,12 @@ public final class MockChannel implements Closeable, TcpChannel { * @param socket The client socket. Mut not be null. * @param localAddress Address associated with the corresponding local server socket. Must not be null. * @param profile The associated profile name. - * @param onClose Callback to execute when this channel is closed. */ - public MockChannel(Socket socket, InetSocketAddress localAddress, String profile, Consumer onClose) { + public MockChannel(Socket socket, InetSocketAddress localAddress, String profile) { this.localAddress = localAddress; this.activeChannel = socket; this.serverSocket = null; this.profile = profile; - this.onClose = () -> onClose.accept(this); synchronized (openChannels) { openChannels.add(this); } @@ -246,12 +243,11 @@ public MockChannel(Socket socket, InetSocketAddress localAddress, String profile * @param serverSocket The associated server socket. Must not be null. * @param profile The associated profile name. */ - public MockChannel(ServerSocket serverSocket, String profile) { + MockChannel(ServerSocket serverSocket, String profile) { this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); this.serverSocket = serverSocket; this.profile = profile; this.activeChannel = null; - this.onClose = null; synchronized (openChannels) { openChannels.add(this); } @@ -266,8 +262,19 @@ public void accept(Executor executor) throws IOException { synchronized (this) { if (isOpen.get()) { incomingChannel = new MockChannel(incomingSocket, - new InetSocketAddress(incomingSocket.getLocalAddress(), incomingSocket.getPort()), profile, - workerChannels::remove); + new InetSocketAddress(incomingSocket.getLocalAddress(), incomingSocket.getPort()), profile); + MockChannel finalIncomingChannel = incomingChannel; + incomingChannel.addCloseListener(new ActionListener() { + @Override + public void onResponse(Void aVoid) { + workerChannels.remove(finalIncomingChannel); + } + + @Override + public void onFailure(Exception e) { + workerChannels.remove(finalIncomingChannel); + } + }); serverAcceptedChannel(incomingChannel); //establish a happens-before edge between closing and accepting a new connection workerChannels.add(incomingChannel); @@ -287,7 +294,7 @@ public void accept(Executor executor) throws IOException { } } - public void loopRead(Executor executor) { + void loopRead(Executor executor) { executor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -312,7 +319,7 @@ protected void doRun() throws Exception { }); } - public synchronized void close0() throws IOException { + synchronized void close0() throws IOException { // establish a happens-before edge between closing and accepting a new connection // we have to sync this entire block to ensure that our openChannels checks work correctly. // The close block below will close all worker channels but if one of the worker channels runs into an exception @@ -325,7 +332,7 @@ public synchronized void close0() throws IOException { removedChannel = openChannels.remove(this); } IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels), - () -> cancellableThreads.cancel("channel closed"), onClose); + () -> cancellableThreads.cancel("channel closed")); assert removedChannel: "Channel was not removed or removed twice?"; } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 5911d10fa2973..a8876453b5b2f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -21,13 +21,11 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -83,9 +81,8 @@ protected MockServerChannel bind(String name, InetSocketAddress address) throws } @Override - protected MockSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException { - MockSocketChannel channel = nioGroup.openChannel(node.getAddress().address(), clientChannelFactory); + protected MockSocketChannel initiateChannel(InetSocketAddress address, ActionListener connectListener) throws IOException { + MockSocketChannel channel = nioGroup.openChannel(address, clientChannelFactory); channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; } From bd11e6c44193260d1219c6537e19ee4f236f8e1e Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 18:30:38 +0100 Subject: [PATCH 10/94] Fix NPE on composite aggregation with sub-aggregations that need scores (#28129) The composite aggregation defers the collection of sub-aggregations to a second pass that visits documents only if they appear in the top buckets. Though the scorer for sub-aggregations is not set on this second pass and generates an NPE if any sub-aggregation tries to access the score. This change creates a scorer for the second pass and makes sure that sub-aggs can use it safely to check the score of the collected documents. --- .../bucket/composite/CompositeAggregator.java | 23 ++++++ .../composite/CompositeAggregatorTests.java | 73 ++++++++++++++++++- .../aggregations/AggregatorTestCase.java | 39 +++++++--- 3 files changed, 123 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 9612ba2f895bc..3467aaf318baf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -23,6 +23,9 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -87,6 +90,12 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException // Replay all documents that contain at least one top bucket (collected during the first pass). grow(keys.size()+1); + final boolean needsScores = needsScores(); + Weight weight = null; + if (needsScores) { + Query query = context.query(); + weight = context.searcher().createNormalizedWeight(query, true); + } for (LeafContext context : contexts) { DocIdSetIterator docIdSetIterator = context.docIdSet.iterator(); if (docIdSetIterator == null) { @@ -95,7 +104,21 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException final CompositeValuesSource.Collector collector = array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector)); int docID; + DocIdSetIterator scorerIt = null; + if (needsScores) { + Scorer scorer = weight.scorer(context.ctx); + // We don't need to check if the scorer is null + // since we are sure that there are documents to replay (docIdSetIterator it not empty). + scorerIt = scorer.iterator(); + context.subCollector.setScorer(scorer); + } while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (needsScores) { + assert scorerIt.docID() < docID; + scorerIt.advance(docID); + // aggregations should only be replayed on matching documents + assert scorerIt.docID() == docID; + } collector.collect(docID); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 339f9bda65a0a..172aebbc0e5dc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -50,6 +50,8 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; +import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTimeZone; @@ -1065,8 +1067,73 @@ public void testWithKeywordAndDateHistogram() throws IOException { ); } - private void testSearchCase(Query query, - Sort sort, + public void testWithKeywordAndTopHits() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("keyword", "a"), + createDocument("keyword", "c"), + createDocument("keyword", "a"), + createDocument("keyword", "d"), + createDocument("keyword", "c") + ) + ); + final Sort sort = new Sort(new SortedSetSortField("keyword", false)); + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + topHits = result.getBuckets().get(2).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L);; + } + ); + + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword") + .field("keyword"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .aggregateAfter(Collections.singletonMap("keyword", "a")) + .subAggregation(new TopHitsAggregationBuilder("top_hits").storedField("_none_")); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=c}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + TopHits topHits = result.getBuckets().get(0).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 2); + assertEquals(topHits.getHits().getTotalHits(), 2L); + assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + topHits = result.getBuckets().get(1).getAggregations().get("top_hits"); + assertNotNull(topHits); + assertEquals(topHits.getHits().getHits().length, 1); + assertEquals(topHits.getHits().getTotalHits(), 1L); + } + ); + } + + private void testSearchCase(Query query, Sort sort, List>> dataset, Supplier create, Consumer verify) throws IOException { @@ -1107,7 +1174,7 @@ private void executeTestCase(boolean reduced, IndexSearcher indexSearcher = newSearcher(indexReader, sort == null, sort == null); CompositeAggregationBuilder aggregationBuilder = create.get(); if (sort != null) { - CompositeAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); + CompositeAggregator aggregator = createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, FIELD_TYPES); assertTrue(aggregator.canEarlyTerminate()); } final InternalComposite composite; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index f34b1c6e79f69..720d701e64ced 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -103,16 +103,27 @@ protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggreg new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregatorFactory(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + /** Create a factory for the given aggregation builder. */ + protected AggregatorFactory createAggregatorFactory(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { SearchContext searchContext = createSearchContext(indexSearcher, indexSettings); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); when(searchContext.aggregations()) .thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); + when(searchContext.query()).thenReturn(query); when(searchContext.bigArrays()).thenReturn(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService)); // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests: MapperService mapperService = mapperServiceMock(); @@ -146,19 +157,20 @@ protected A createAggregator(AggregationBuilder aggregati new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, IndexSettings indexSettings, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, indexSettings, + return createAggregator(query, aggregationBuilder, indexSearcher, indexSettings, new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); } - protected A createAggregator(AggregationBuilder aggregationBuilder, + protected A createAggregator(Query query, AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { - return createAggregator(aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); + return createAggregator(query, aggregationBuilder, indexSearcher, createIndexSettings(), bucketConsumer, fieldTypes); } protected A createAggregator(AggregationBuilder aggregationBuilder, @@ -166,8 +178,17 @@ protected A createAggregator(AggregationBuilder aggregati IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + return createAggregator(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); + } + + protected A createAggregator(Query query, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) throws IOException { @SuppressWarnings("unchecked") - A aggregator = (A) createAggregatorFactory(aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) + A aggregator = (A) createAggregatorFactory(query, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) .create(null, true); return aggregator; } @@ -262,7 +283,7 @@ protected A search(IndexSe int maxBucket, MappedFieldType... fieldTypes) throws IOException { MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C a = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); a.preCollection(); searcher.search(query, a); a.postCollection(); @@ -310,11 +331,11 @@ protected A searchAndReduc Query rewritten = searcher.rewrite(query); Weight weight = searcher.createWeight(rewritten, true, 1f); MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); - C root = createAggregator(builder, searcher, bucketConsumer, fieldTypes); + C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); for (ShardSearcher subSearcher : subSearchers) { MultiBucketConsumer shardBucketConsumer = new MultiBucketConsumer(maxBucket); - C a = createAggregator(builder, subSearcher, shardBucketConsumer, fieldTypes); + C a = createAggregator(query, builder, subSearcher, shardBucketConsumer, fieldTypes); a.preCollection(); subSearcher.search(weight, a); a.postCollection(); From 77a7e2480b6745855d0bbfc6020ce70378b6267e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:10 +0100 Subject: [PATCH 11/94] Allow update of `eager_global_ordinals` on `_parent`. (#28014) A bug introduced in #24407 currently prevents `eager_global_ordinals` from being updated. This new approach should fix the issue while still allowing mapping updates to not specify the `_parent` field if it doesn't need updating, which was the goal of #24407. --- .../index/mapper/ParentFieldMapper.java | 11 +++++----- .../index/mapper/ParentFieldMapperTests.java | 20 +++++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 73109a3ecd8f9..34eaf569ca949 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -303,15 +303,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; - ParentFieldType currentFieldType = (ParentFieldType) fieldType.clone(); - super.doMerge(mergeWith, updateAllTypes); if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } - - if (active()) { - fieldType = currentFieldType; + // If fieldMergeWith is not active it means the user provided a mapping + // update that does not explicitly configure the _parent field, so we + // ignore it. + if (fieldMergeWith.active()) { + super.doMerge(mergeWith, updateAllTypes); } + } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index d0e17b808c596..d21827ee18cea 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -138,4 +139,23 @@ private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) { return numFieldWithParentPrefix; } + public void testUpdateEagerGlobalOrds() throws IOException { + String parentMapping = XContentFactory.jsonBuilder().startObject().startObject("parent_type") + .endObject().endObject().string(); + String childMapping = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").endObject() + .endObject().endObject().string(); + IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + + assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + + String childMappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("child_type") + .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() + .endObject().endObject().string(); + indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE, false); + + assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); + } } From 05e851f0b0fb6e28a8c4e0191ef6f54626d16465 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:34:38 +0100 Subject: [PATCH 12/94] Ignore the `-snapshot` suffix when comparing the Lucene version in the build and the docs. (#27927) Currently if the Lucene version is `X.Y.Z-snapshot-{gitrev}`, then we will expect the docs to have `X.Y.Z-snapshot` as a Lucene version. I would like to change it to `X.Y.Z` so that this doesn't need changing when we move from a snapshot to a final release. --- qa/verify-version-constants/build.gradle | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 111c4ccf20e50..1d31db6898b7b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -76,10 +76,8 @@ task verifyDocsLuceneVersion { throw new GradleException('Could not find lucene version in docs version file') } String expectedLuceneVersion = VersionProperties.lucene - if (expectedLuceneVersion.contains('-snapshot-')) { - expectedLuceneVersion = expectedLuceneVersion.substring(0, expectedLuceneVersion.lastIndexOf('-')) - expectedLuceneVersion = expectedLuceneVersion.toUpperCase(Locale.ROOT) - } + // remove potential -snapshot-{gitrev} suffix + expectedLuceneVersion -= ~/-snapshot-[0-9a-f]+$/ if (docsLuceneVersion != expectedLuceneVersion) { throw new GradleException("Lucene version in docs [${docsLuceneVersion}] does not match version.properties [${expectedLuceneVersion}]") } From a16f80a8321776aea6a105315ffc503014e45732 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:35:27 +0100 Subject: [PATCH 13/94] Fix casts in HotThreads. (#27578) Even though an overflow would be very unlikely, it's better to use the longs directly in the comparator. --- .../elasticsearch/monitor/jvm/HotThreads.java | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 1714d00abb206..3b6415437f97c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -35,6 +35,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.ToLongFunction; public class HotThreads { @@ -187,19 +188,19 @@ private String innerDetect() throws Exception { List hotties = new ArrayList<>(threadInfos.values()); final int busiestThreads = Math.min(this.busiestThreads, hotties.size()); // skip that for now - CollectionUtil.introSort(hotties, new Comparator() { - @Override - public int compare(MyThreadInfo o1, MyThreadInfo o2) { - if ("cpu".equals(type)) { - return (int) (o2.cpuTime - o1.cpuTime); - } else if ("wait".equals(type)) { - return (int) (o2.waitedTime - o1.waitedTime); - } else if ("block".equals(type)) { - return (int) (o2.blockedTime - o1.blockedTime); - } - throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); - } - }); + final ToLongFunction getter; + if ("cpu".equals(type)) { + getter = o -> o.cpuTime; + } else if ("wait".equals(type)) { + getter = o -> o.waitedTime; + } else if ("block".equals(type)) { + getter = o -> o.blockedTime; + } else { + throw new IllegalArgumentException("expected thread type to be either 'cpu', 'wait', or 'block', but was " + type); + } + + CollectionUtil.introSort(hotties, Comparator.comparingLong(getter).reversed()); + // analyse N stack traces for M busiest threads long[] ids = new long[busiestThreads]; for (int i = 0; i < busiestThreads; i++) { @@ -215,14 +216,7 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) { Thread.sleep(threadElementsSnapshotDelay.millis()); } for (int t = 0; t < busiestThreads; t++) { - long time = 0; - if ("cpu".equals(type)) { - time = hotties.get(t).cpuTime; - } else if ("wait".equals(type)) { - time = hotties.get(t).waitedTime; - } else if ("block".equals(type)) { - time = hotties.get(t).blockedTime; - } + long time = getter.applyAsLong(hotties.get(t)); String threadName = null; for (ThreadInfo[] info : allInfos) { if (info != null && info[t] != null) { From 0a92e43f6252911a0a0fab44af6e0075f57d7bad Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 15 Jan 2018 18:36:32 +0100 Subject: [PATCH 14/94] Avoid doing redundant work when checking for self references. (#26927) Currently we test all maps, arrays or iterables. However, in the case that maps contain sub maps for instance, we will test the sub maps again even though the work has already been done for the top-level map. Relates #26907 --- .../common/xcontent/XContentBuilder.java | 49 ++++++++++--------- .../common/xcontent/BaseXContentTestCase.java | 1 - 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index f0427ce246669..070510e13ff69 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -773,32 +773,23 @@ public XContentBuilder field(String name, Object value) throws IOException { } public XContentBuilder array(String name, Object... values) throws IOException { - return field(name).values(values); + return field(name).values(values, true); } - XContentBuilder values(Object[] values) throws IOException { + private XContentBuilder values(Object[] values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } - // checks that the array of object does not contain references to itself because - // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); - - startArray(); - for (Object o : values) { - value(o); - } - endArray(); - return this; + return value(Arrays.asList(values), ensureNoSelfReferences); } public XContentBuilder value(Object value) throws IOException { - unknownValue(value); + unknownValue(value, true); return this; } - private void unknownValue(Object value) throws IOException { + private void unknownValue(Object value, boolean ensureNoSelfReferences) throws IOException { if (value == null) { nullValue(); return; @@ -810,11 +801,11 @@ private void unknownValue(Object value) throws IOException { //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value); + map((Map) value, ensureNoSelfReferences); } else if (value instanceof Iterable) { - value((Iterable) value); + value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { - values((Object[]) value); + values((Object[]) value, ensureNoSelfReferences); } else if (value instanceof Calendar) { value((Calendar) value); } else if (value instanceof ReadableInstant) { @@ -863,18 +854,25 @@ public XContentBuilder field(String name, Map values) throws IOE } public XContentBuilder map(Map values) throws IOException { + return map(values, true); + } + + private XContentBuilder map(Map values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startObject(); for (Map.Entry value : values.entrySet()) { field(value.getKey()); - unknownValue(value.getValue()); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value.getValue(), false); } endObject(); return this; @@ -884,7 +882,7 @@ public XContentBuilder field(String name, Iterable values) throws IOException return field(name).value(values); } - private XContentBuilder value(Iterable values) throws IOException { + private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences) throws IOException { if (values == null) { return nullValue(); } @@ -895,11 +893,14 @@ private XContentBuilder value(Iterable values) throws IOException { } else { // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error - ensureNoSelfReferences(values); + if (ensureNoSelfReferences) { + ensureNoSelfReferences(values); + } startArray(); for (Object value : values) { - unknownValue(value); + // pass ensureNoSelfReferences=false as we already performed the check at a higher level + unknownValue(value, false); } endArray(); } @@ -1076,9 +1077,9 @@ private static void ensureNoSelfReferences(final Object value, final Set Iterable it; if (value instanceof Map) { - it = ((Map) value).values(); + it = ((Map) value).values(); } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - it = (Iterable) value; + it = (Iterable) value; } else if (value instanceof Object[]) { it = Arrays.asList((Object[]) value); } else { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e468751cf4aba..e368163a4e95c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -534,7 +534,6 @@ public void testObjects() throws Exception { final String expected = o.getKey(); assertResult(expected, () -> builder().startObject().field("objects", o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().field("objects").value(o.getValue()).endObject()); - assertResult(expected, () -> builder().startObject().field("objects").values(o.getValue()).endObject()); assertResult(expected, () -> builder().startObject().array("objects", o.getValue()).endObject()); } } From b82017cbfeb8eff0af7bf2c6f8e93db91d172e73 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 15 Jan 2018 19:35:54 +0100 Subject: [PATCH 15/94] Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter (#28225) This commit changes the phonetic filter factory to use a DaitchMokotoffSoundexFilter instead of a PhoneticFilter with a daitch_mokotoff encoder when daitch_mokotoff is selected. The latter does not hanlde branching when computing the soundex and fails to encode multiple variations when possible. Closes #28211 --- .../index/analysis/PhoneticTokenFilterFactory.java | 9 ++++++++- .../index/analysis/SimplePhoneticAnalysisTests.java | 11 +++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index d02ac2ae2be70..b63ad561a5add 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -33,6 +33,7 @@ import org.apache.commons.codec.language.bm.RuleType; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; import org.elasticsearch.common.settings.Settings; @@ -53,6 +54,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private List languageset; private NameType nametype; private RuleType ruletype; + private boolean isDaitchMokotoff; public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); @@ -60,6 +62,7 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir this.nametype = null; this.ruletype = null; this.maxcodelength = 0; + this.isDaitchMokotoff = false; this.replace = settings.getAsBoolean("replace", true); // weird, encoder is null at last step in SimplePhoneticAnalysisTests, so we set it to metaphone as default String encodername = settings.get("encoder", "metaphone"); @@ -106,7 +109,8 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir } else if ("nysiis".equalsIgnoreCase(encodername)) { this.encoder = new Nysiis(); } else if ("daitch_mokotoff".equalsIgnoreCase(encodername)) { - this.encoder = new DaitchMokotoffSoundex(); + this.encoder = null; + this.isDaitchMokotoff = true; } else { throw new IllegalArgumentException("unknown encoder [" + encodername + "] for phonetic token filter"); } @@ -115,6 +119,9 @@ public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment envir @Override public TokenStream create(TokenStream tokenStream) { if (encoder == null) { + if (isDaitchMokotoff) { + return new DaitchMokotoffSoundexFilter(tokenStream, !replace); + } if (ruletype != null && nametype != null) { LanguageSet langset = null; if (languageset != null && languageset.size() > 0) { diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java index e3877faee3146..7fad525b33c3e 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; @@ -72,4 +73,14 @@ public void testPhoneticTokenFilterBeiderMorseWithLanguage() throws IOException "rmba", "rmbalt", "rmbo", "rmbolt", "rmbu", "rmbult" }; BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); } + + public void testPhoneticTokenFilterDaitchMotokoff() throws IOException { + TokenFilterFactory filterFactory = analysis.tokenFilter.get("daitch_mokotoff"); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("chauptman")); + String[] expected = new String[] { "473660", "573660" }; + assertThat(filterFactory.create(tokenizer), instanceOf(DaitchMokotoffSoundexFilter.class)); + BaseTokenStreamTestCase.assertTokenStreamContents(filterFactory.create(tokenizer), expected); + } + } From 18463e7e9f5243648eae1f4ee99a15c4c1b52b36 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 15 Jan 2018 11:28:31 -0800 Subject: [PATCH 16/94] Painless: Add whitelist extensions (#28161) This commit adds a PainlessExtension which may be plugged in via SPI to add additional classes, methods and members to the painless whitelist on a per context basis. An example plugin adding and using a whitelist is also added. --- .../org/elasticsearch/painless/Compiler.java | 1 + .../elasticsearch/painless/Definition.java | 24 +------- .../painless/PainlessPlugin.java | 30 +++++++++- .../painless/PainlessScriptEngine.java | 19 +++---- .../painless/spi/PainlessExtension.java | 30 ++++++++++ .../painless/{ => spi}/Whitelist.java | 22 ++++++- .../painless/{ => spi}/WhitelistLoader.java | 7 ++- .../plugin-metadata/plugin-security.policy | 3 + .../painless/{ => spi}/java.lang.txt | 0 .../painless/{ => spi}/java.math.txt | 0 .../painless/{ => spi}/java.text.txt | 0 .../painless/{ => spi}/java.time.chrono.txt | 0 .../painless/{ => spi}/java.time.format.txt | 0 .../painless/{ => spi}/java.time.temporal.txt | 0 .../painless/{ => spi}/java.time.txt | 0 .../painless/{ => spi}/java.time.zone.txt | 0 .../painless/{ => spi}/java.util.function.txt | 0 .../painless/{ => spi}/java.util.regex.txt | 0 .../painless/{ => spi}/java.util.stream.txt | 0 .../painless/{ => spi}/java.util.txt | 0 .../painless/{ => spi}/joda.time.txt | 0 .../painless/{ => spi}/org.elasticsearch.txt | 0 .../painless/AnalyzerCasterTests.java | 8 +-- .../painless/BaseClassTests.java | 8 +-- .../elasticsearch/painless/DebugTests.java | 5 +- .../org/elasticsearch/painless/Debugger.java | 5 +- .../painless/DefBootstrapTests.java | 4 +- .../elasticsearch/painless/FactoryTests.java | 16 +++--- .../painless/NeedsScoreTests.java | 13 +++-- .../painless/PainlessDocGenerator.java | 6 +- .../painless/ScriptTestCase.java | 17 +++--- .../painless/SimilarityScriptTests.java | 13 +++-- .../painless/node/NodeToStringTests.java | 6 +- .../examples/painless-whitelist/build.gradle | 4 ++ .../ExampleWhitelistExtension.java | 42 ++++++++++++++ .../ExampleWhitelistedClass.java | 57 +++++++++++++++++++ .../painlesswhitelist/MyWhitelistPlugin.java | 1 + ...asticsearch.painless.spi.PainlessExtension | 1 + .../painlesswhitelist/example_whitelist.txt | 42 ++++++++++++++ .../test/painless_whitelist/20_whitelist.yml | 26 +++++++++ 40 files changed, 319 insertions(+), 91 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/Whitelist.java (93%) rename modules/lang-painless/src/main/java/org/elasticsearch/painless/{ => spi}/WhitelistLoader.java (98%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.lang.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.math.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.text.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.chrono.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.format.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.temporal.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.time.zone.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.function.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.regex.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.stream.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/java.util.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/joda.time.txt (100%) rename modules/lang-painless/src/main/resources/org/elasticsearch/painless/{ => spi}/org.elasticsearch.txt (100%) create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java create mode 100644 plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java create mode 100644 plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension create mode 100644 plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt create mode 100644 plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index ad5e80ba16edd..8102016828c30 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -22,6 +22,7 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.painless.node.SSource; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Printer; import java.lang.reflect.Constructor; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 7d8b4ff4e614e..7729c5319ea81 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.painless.spi.Whitelist; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -46,29 +47,6 @@ public final class Definition { private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - public static final String[] DEFINITION_FILES = new String[] { - "org.elasticsearch.txt", - "java.lang.txt", - "java.math.txt", - "java.text.txt", - "java.time.txt", - "java.time.chrono.txt", - "java.time.format.txt", - "java.time.temporal.txt", - "java.time.zone.txt", - "java.util.txt", - "java.util.function.txt", - "java.util.regex.txt", - "java.util.stream.txt", - "joda.time.txt" - }; - - /** - * Whitelist that is "built in" to Painless and required by all scripts. - */ - public static final Definition DEFINITION = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); - /** Some native types as constants: */ public final Type voidType; public final Type booleanType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 842af8717a34b..795d81bb6e058 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -22,28 +22,56 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; /** * Registers Painless as a plugin. */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { + private final Map, List> extendedWhitelists = new HashMap<>(); + @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new PainlessScriptEngine(settings, contexts); + Map, List> contextsWithWhitelists = new HashMap<>(); + for (ScriptContext context : contexts) { + // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI + List whitelists = extendedWhitelists.get(context); + if (whitelists == null) { + whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + } + contextsWithWhitelists.put(context, whitelists); + } + return new PainlessScriptEngine(settings, contextsWithWhitelists); } @Override public List> getSettings() { return Arrays.asList(CompilerSettings.REGEX_ENABLED); } + + @Override + public void reloadSPI(ClassLoader loader) { + for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { + for (Map.Entry, List> entry : extension.getContextWhitelists().entrySet()) { + List existing = extendedWhitelists.computeIfAbsent(entry.getKey(), + c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); + existing.addAll(entry.getValue()); + } + } + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index ac01f45a7fdd6..95a38bf22c653 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -19,12 +19,12 @@ package org.elasticsearch.painless; -import org.apache.logging.log4j.core.tools.Generate; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.Compiler.Loader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; @@ -45,7 +45,6 @@ import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -82,7 +81,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr /** * Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside - * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Collection)}. + * of {@link PainlessScriptEngine#PainlessScriptEngine(Settings, Map)}. */ private final CompilerSettings defaultCompilerSettings = new CompilerSettings(); @@ -92,23 +91,19 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr * Constructor. * @param settings The settings to initialize the engine with. */ - public PainlessScriptEngine(Settings settings, Collection> contexts) { + public PainlessScriptEngine(Settings settings, Map, List> contexts) { super(settings); defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); Map, Compiler> contextsToCompilers = new HashMap<>(); - // Placeholder definition used for all contexts until SPI is fully integrated. Reduces memory foot print - // by re-using the same definition since caching isn't implemented at this time. - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); - - for (ScriptContext context : contexts) { + for (Map.Entry, List> entry : contexts.entrySet()) { + ScriptContext context = entry.getKey(); if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) { - contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, definition)); + contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, new Definition(entry.getValue()))); } else { - contextsToCompilers.put(context, new Compiler(context.instanceClazz, definition)); + contextsToCompilers.put(context, new Compiler(context.instanceClazz, new Definition(entry.getValue()))); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java new file mode 100644 index 0000000000000..9434e6986c0a3 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Map; + +import org.elasticsearch.script.ScriptContext; + +public interface PainlessExtension { + + Map, List> getContextWhitelists(); +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 93% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 678b8a4c1ae38..e715eb0090c7f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Whitelist.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.util.Collections; import java.util.List; @@ -34,6 +34,26 @@ */ public final class Whitelist { + private static final String[] BASE_WHITELIST_FILES = new String[] { + "org.elasticsearch.txt", + "java.lang.txt", + "java.math.txt", + "java.text.txt", + "java.time.txt", + "java.time.chrono.txt", + "java.time.format.txt", + "java.time.temporal.txt", + "java.time.zone.txt", + "java.util.txt", + "java.util.function.txt", + "java.util.regex.txt", + "java.util.stream.txt", + "joda.time.txt" + }; + + public static final List BASE_WHITELISTS = + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Whitelist.class, BASE_WHITELIST_FILES)); + /** * Struct represents the equivalent of a Java class in Painless complete with super classes, * constructors, methods, and fields. In Painless a class is known as a struct primarily to avoid diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 98% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java rename to modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 93ea951f453aa..8817bfa274c60 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WhitelistLoader.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.painless; +package org.elasticsearch.painless.spi; import java.io.InputStreamReader; import java.io.LineNumberReader; @@ -25,6 +25,8 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -296,8 +298,9 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(resource.getClassLoader(), whitelistStructs); + return new Whitelist(loader, whitelistStructs); } private WhitelistLoader() {} diff --git a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy index e45c1b86ceb2c..b383c6da3f12c 100644 --- a/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy +++ b/modules/lang-painless/src/main/plugin-metadata/plugin-security.policy @@ -20,4 +20,7 @@ grant { // needed to generate runtime classes permission java.lang.RuntimePermission "createClassLoader"; + + // needed to find the classloader to load whitelisted classes from + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.lang.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.math.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.math.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.text.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.text.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.chrono.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.format.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.format.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.temporal.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.temporal.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.zone.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.time.zone.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.function.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.function.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.regex.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.stream.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.stream.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/java.util.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/joda.time.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt similarity index 100% rename from modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt rename to modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 58ae31a45c93a..919b0881c0794 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -21,16 +21,12 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; - -import static org.elasticsearch.painless.Definition.DEFINITION_FILES; - public class AnalyzerCasterTests extends ESTestCase { - private static final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); + private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index 2ba8692b8af59..59cafa96ddcb9 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -19,13 +19,12 @@ package org.elasticsearch.painless; -import org.elasticsearch.script.ScriptContext; - -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.elasticsearch.painless.spi.Whitelist; + import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; @@ -37,8 +36,7 @@ */ public class BaseClassTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public abstract static class Gets { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index a55b48f0189b3..279438e74a7c3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -22,10 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptException; import java.io.IOException; -import java.util.Collections; import java.util.Map; import static java.util.Collections.singletonList; @@ -35,8 +35,7 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 52ec783db4ef4..e29986a3c87de 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -20,11 +20,11 @@ package org.elasticsearch.painless; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.util.Textifier; import java.io.PrintWriter; import java.io.StringWriter; -import java.util.Collections; /** quick and dirty tools for debugging */ final class Debugger { @@ -40,8 +40,7 @@ static String toString(Class iface, String source, CompilerSettings settings) PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES)))) + new Compiler(iface, new Definition(Whitelist.BASE_WHITELISTS)) .compile("", source, settings, textifier); } catch (Exception e) { textifier.print(outputWriter); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index dccc9c0aeb505..8fd96d67d5b53 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -27,11 +27,11 @@ import java.util.Collections; import java.util.HashMap; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; public class DefBootstrapTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java index b15a2747bd088..556ef8dd3c6d3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java @@ -19,21 +19,23 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.TemplateScript; -import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; public class FactoryTests extends ScriptTestCase { - protected Collection> scriptContexts() { - Collection> contexts = super.scriptContexts(); - contexts.add(StatefulFactoryTestScript.CONTEXT); - contexts.add(FactoryTestScript.CONTEXT); - contexts.add(EmptyTestScript.CONTEXT); - contexts.add(TemplateScript.CONTEXT); + @Override + protected Map, List> scriptContexts() { + Map, List> contexts = super.scriptContexts(); + contexts.put(StatefulFactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(FactoryTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(EmptyTestScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(TemplateScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index db254b734a81a..50a377b881878 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -22,14 +22,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * Test that needsScores() is reported correctly depending on whether _score is used @@ -40,8 +43,10 @@ public class NeedsScoreTests extends ESSingleNodeTestCase { public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); - PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, - Arrays.asList(SearchScript.CONTEXT, ExecutableScript.CONTEXT)); + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); + PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); QueryShardContext shardContext = index.newQueryShardContext(0, null, () -> 0, null); SearchLookup lookup = new SearchLookup(index.mapperService(), shardContext::getForField, null); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index edd600c5664f2..87b1677102635 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -27,7 +27,7 @@ import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Struct; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.api.Augmentation; +import org.elasticsearch.painless.spi.Whitelist; import java.io.IOException; import java.io.PrintStream; @@ -36,7 +36,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -68,8 +67,7 @@ public static void main(String[] args) throws IOException { Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(indexStream); - List types = new Definition(Collections.singletonList( - WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))). + List types = new Definition(Whitelist.BASE_WHITELISTS). allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); for (Type type : types) { if (type.clazz.isPrimitive()) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 730dd298f8a54..ea1d2275b3e8d 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptException; @@ -31,10 +32,8 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.painless.node.SSource.MainMethodReserved; @@ -63,11 +62,10 @@ protected Settings scriptEngineSettings() { /** * Script contexts used to build the script engine. Override to customize which script contexts are available. */ - protected Collection> scriptContexts() { - Collection> contexts = new ArrayList<>(); - contexts.add(SearchScript.CONTEXT); - contexts.add(ExecutableScript.CONTEXT); - + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SearchScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ExecutableScript.CONTEXT, Whitelist.BASE_WHITELISTS); return contexts; } @@ -92,8 +90,7 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + Definition definition = new Definition(Whitelist.BASE_WHITELISTS); ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index d8f43fb066867..0795ab7777526 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -37,20 +37,25 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.index.similarity.ScriptedSimilarity; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SimilarityScript; import org.elasticsearch.script.SimilarityWeightScript; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; public class SimilarityScriptTests extends ScriptTestCase { @Override - protected Collection> scriptContexts() { - return Arrays.asList(SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT); + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(SimilarityScript.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(SimilarityWeightScript.CONTEXT, Whitelist.BASE_WHITELISTS); + return contexts; } public void testBasics() throws IOException { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 9e3477b1cfe02..424b0c286ecff 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -33,12 +33,11 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ScriptClassInfo; -import org.elasticsearch.painless.WhitelistLoader; +import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -50,8 +49,7 @@ * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final Definition definition = new Definition( - Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + private final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); public void testEAssignment() { assertToString( diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 2213aea16f6cd..12bbff8b0419e 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -26,6 +26,10 @@ esplugin { extendedPlugins = ['lang-painless'] } +dependencies { + compileOnly project(':modules:lang-painless') +} + integTestCluster { distribution = 'zip' } diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java new file mode 100644 index 0000000000000..9e3bc66e7d58d --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistExtension.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.elasticsearch.painless.spi.PainlessExtension; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.SearchScript; + +/** An extension of painless which adds a whitelist. */ +public class ExampleWhitelistExtension implements PainlessExtension { + + private static final Whitelist WHITELIST = + WhitelistLoader.loadFromResourceFiles(ExampleWhitelistExtension.class, "example_whitelist.txt"); + + @Override + public Map, List> getContextWhitelists() { + return Collections.singletonMap(SearchScript.CONTEXT, Collections.singletonList(WHITELIST)); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java new file mode 100644 index 0000000000000..14f15b383d0c8 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/ExampleWhitelistedClass.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.example.painlesswhitelist; + +/** + * An example of a class to be whitelisted for use by painless scripts + * + * Each of the members and methods below are whitelisted for use in search scripts. + * See example_whitelist.txt. + */ +public class ExampleWhitelistedClass { + + public static final int CONSTANT = 42; + + public int publicMember; + + private int privateMember; + + public ExampleWhitelistedClass(int publicMember, int privateMember) { + this.publicMember = publicMember; + this.privateMember = privateMember; + } + + public int getPrivateMemberAccessor() { + return this.privateMember; + } + + public void setPrivateMemberAccessor(int privateMember) { + this.privateMember = privateMember; + } + + public static void staticMethod() { + // electricity + } + + // example augmentation method + public static int toInt(String x) { + return Integer.parseInt(x); + } +} diff --git a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java index 877a05391ac77..a4ef5f6f000e1 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/elasticsearch/example/painlesswhitelist/MyWhitelistPlugin.java @@ -22,4 +22,5 @@ import org.elasticsearch.plugins.Plugin; public class MyWhitelistPlugin extends Plugin { + // we don't actually need anything here, since whitelists are extended through SPI } diff --git a/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension new file mode 100644 index 0000000000000..9babd702c8083 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/META-INF/services/org.elasticsearch.painless.spi.PainlessExtension @@ -0,0 +1 @@ +org.elasticsearch.example.painlesswhitelist.ExampleWhitelistExtension \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt new file mode 100644 index 0000000000000..7908d35417511 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/main/resources/org/elasticsearch/example/painlesswhitelist/example_whitelist.txt @@ -0,0 +1,42 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file contains a whitelist for an example class which may be access from painless + +class org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass { + # constructor + (int, int) + + # static constants and methods look the same as instance members and methods + int CONSTANT + void staticMethod() + + # members lack parenthesis that methods have + int publicMember + + # getter and setter for private member + int getPrivateMemberAccessor() + void setPrivateMemberAccessor(int) +} + +class java.lang.String { + # existing classes can be "augmented" to have additional methods, which take the object + # to operate on as the first argument to a static method + int org.elasticsearch.example.painlesswhitelist.ExampleWhitelistedClass toInt() +} \ No newline at end of file diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml new file mode 100644 index 0000000000000..bbb0b44ef1d45 --- /dev/null +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml @@ -0,0 +1,26 @@ +# Example test using whitelisted members and methods + +"Whitelisted custom class": + - do: + index: + index: test + type: test + id: 1 + body: { "num1": 1.0 } + - do: + indices.refresh: {} + + - do: + index: test + search: + body: + query: + match_all: {} + script_fields: + sNum1: + script: + source: "def e = new ExampleWhitelistedClass(6, 42); ExampleWhitelistedClass.staticMethod(); return e.publicMember + e.privateMemberAccessor + ExampleWhitelistedClass.CONSTANT + '2'.toInt()" + lang: painless + + - match: { hits.total: 1 } + - match: { hits.hits.0.fields.sNum1.0: 92 } From 5ed25f1e12b4b3e249da107745a99941773b389d Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 6 Dec 2017 11:58:20 -0600 Subject: [PATCH 17/94] [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder Add WKT BBOX parsing support to GeoBoundingBoxQueryBuilder. --- .../query-dsl/geo-bounding-box-query.asciidoc | 25 ++++ .../common/geo/parsers/GeoWKTParser.java | 21 ++- .../query/GeoBoundingBoxQueryBuilder.java | 135 +++++++++++------- .../common/geo/GeoWKTShapeParserTests.java | 12 ++ .../GeoBoundingBoxQueryBuilderTests.java | 44 ++++++ 5 files changed, 181 insertions(+), 56 deletions(-) diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index e8db949bbc6b8..a1b427acf2718 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -180,6 +180,31 @@ GET /_search -------------------------------------------------- // CONSOLE +[float] +===== Bounding Box as Well-Known Text (WKT) + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "wkt" : "BBOX (-74.1, -71.12, 40.73, 40.01)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + [float] ===== Geohash diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 005caed53a7e9..38643df017943 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -63,6 +63,12 @@ private GeoWKTParser() {} public static ShapeBuilder parse(XContentParser parser) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { FastStringReader reader = new FastStringReader(parser.text()); try { // setup the tokenizer; configured to read words w/o numbers @@ -77,7 +83,7 @@ public static ShapeBuilder parse(XContentParser parser) tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType); checkEOF(tokenizer); return builder; } finally { @@ -86,8 +92,14 @@ public static ShapeBuilder parse(XContentParser parser) } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); + if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { + if (type.wktName().equals(shapeType.wktName()) == false) { + throw new ElasticsearchParseException("Expected geometry type [{}] but found [{}]", shapeType, type); + } + } switch (type) { case POINT: return parsePoint(stream); @@ -228,9 +240,10 @@ private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(parseGeometry(stream)); + GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream)); + builder.shape(parseGeometry(stream, null)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index c0e57cc45afd9..47dcbaa351454 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -31,7 +31,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.geo.builders.EnvelopeBuilder; +import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -62,7 +65,6 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder GeoWKTParser.parseExpectedType(parser, GeoShapeType.POLYGON)); + assertThat(e, hasToString(containsString("Expected geometry type [polygon] but found [point]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 133057fb8d026..aeaca328ceb7b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -406,6 +406,50 @@ public void testFromJson() throws IOException { assertEquals(json, GeoExecType.MEMORY, parsed.type()); } + public void testFromWKT() throws IOException { + String wkt = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"wkt\" : \"BBOX (-74.1, -71.12, 40.73, 40.01)\"\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // toXContent generates the query in geojson only; for now we need to test against the expected + // geojson generated content + String expectedJson = + "{\n" + + " \"geo_bounding_box\" : {\n" + + " \"pin.location\" : {\n" + + " \"top_left\" : [ -74.1, 40.73 ],\n" + + " \"bottom_right\" : [ -71.12, 40.01 ]\n" + + " },\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"type\" : \"MEMORY\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + // parse with wkt + GeoBoundingBoxQueryBuilder parsed = (GeoBoundingBoxQueryBuilder) parseQuery(wkt); + // check the builder's generated geojson content against the expected json output + checkGeneratedJson(expectedJson, parsed); + double delta = 0d; + assertEquals(expectedJson, "pin.location", parsed.fieldName()); + assertEquals(expectedJson, -74.1, parsed.topLeft().getLon(), delta); + assertEquals(expectedJson, 40.73, parsed.topLeft().getLat(), delta); + assertEquals(expectedJson, -71.12, parsed.bottomRight().getLon(), delta); + assertEquals(expectedJson, 40.01, parsed.bottomRight().getLat(), delta); + assertEquals(expectedJson, 1.0, parsed.boost(), delta); + assertEquals(expectedJson, GeoExecType.MEMORY, parsed.type()); + } + @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); From 6c297ad7c8cc85a0d30b7a38f1eecd835a650c70 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 15 Jan 2018 18:13:47 -0500 Subject: [PATCH 18/94] TEST: Update logging for testAckedIndexing - Log the response of indexing requests - Correct logging setting for discovery package --- .../org/elasticsearch/discovery/ClusterDisruptionIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 8d21c6306382b..55f5b70e70299 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -81,7 +81,8 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { *

* This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates */ - @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE,discovery:TRACE," + + @TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") public void testAckedIndexing() throws Exception { @@ -137,7 +138,7 @@ public void testAckedIndexing() throws Exception { .get(timeout); assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); ackedDocs.put(id, node); - logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node); + logger.trace("[{}] indexed id [{}] through node [{}], response [{}]", name, id, node, response); } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; From 71ba314c733fe5f2a175e2b8e8d871d61e3e3202 Mon Sep 17 00:00:00 2001 From: fbsolo Date: Tue, 16 Jan 2018 00:35:35 -0800 Subject: [PATCH 19/94] [Docs] Changes to ingest.asciidoc (#28212) --- docs/reference/ingest.asciidoc | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index da1164930bc1e..18349beab6ab1 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -3,26 +3,27 @@ [partintro] -- -You can use ingest node to pre-process documents before the actual indexing takes place. -This pre-processing happens by an ingest node that intercepts bulk and index requests, applies the -transformations, and then passes the documents back to the index or bulk APIs. +Use an ingest node to pre-process documents before the actual document indexing happens. +The ingest node intercepts bulk and index requests, it applies transformations, and it then +passes the documents back to the index or bulk APIs. -You can enable ingest on any node or even have dedicated ingest nodes. Ingest is enabled by default -on all nodes. To disable ingest on a node, configure the following setting in the `elasticsearch.yml` file: +All nodes enable ingest by default, so any node can handle ingest tasks. You can also create +dedicated ingest nodes. To disable ingest for a node, configure the following setting in the +elasticsearch.yml file: [source,yaml] -------------------------------------------------- node.ingest: false -------------------------------------------------- -To pre-process documents before indexing, you <> that specifies -a series of <>. Each processor transforms the document in some way. -For example, you may have a pipeline that consists of one processor that removes a field from -the document followed by another processor that renames a field. Configured pipelines are then stored -in the <>. +To pre-process documents before indexing, <> that specifies a series of +<>. Each processor transforms the document in some specific way. For example, a +pipeline might have one processor that removes a field from the document, followed by +another processor that renames a field. The <> then stores +the configured pipelines. -To use a pipeline, you simply specify the `pipeline` parameter on an index or bulk request to -tell the ingest node which pipeline to use. For example: +To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This +way, the ingest node knows which pipeline to use. For example: [source,js] -------------------------------------------------- From 0c4e2cbc19a9dad671b135d8f473943119677409 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:50:06 +0100 Subject: [PATCH 20/94] Fallback to TransportMasterNodeAction for cluster health retries (#28195) ClusterHealthAction does not use the regular retry logic, possibly causing StackOverflowErrors. Relates #28169 --- .../admin/cluster/health/TransportClusterHealthAction.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index f4c7748d43924..541738d6be7cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -125,7 +126,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); - doExecute(task, request, listener); + // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException + listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); } @Override From 196c7b80dc2e8bebd9d9023be13639a2078f3d15 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 16 Jan 2018 09:58:58 +0100 Subject: [PATCH 21/94] Never return null from Strings.tokenizeToStringArray (#28224) This method has a different contract than all the other methods in this class, returning null instead of an empty array when receiving a null input. While switching over some methods from delimitedListToStringArray to this method tokenizeToStringArray, this resulted in unexpected nulls in some places of our code. Relates #28213 --- .../src/main/java/org/elasticsearch/common/Strings.java | 5 ++++- .../allocation/decider/FilterAllocationDeciderTests.java | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 6c2fc4e1ec153..02a0852b0a03a 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -474,6 +474,9 @@ public static String[] split(String toSplit, String delimiter) { * @see #delimitedListToStringArray */ public static String[] tokenizeToStringArray(final String s, final String delimiters) { + if (s == null) { + return EMPTY_ARRAY; + } return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new)); } @@ -536,7 +539,7 @@ public static String[] delimitedListToStringArray(String str, String delimiter) */ public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { if (str == null) { - return new String[0]; + return EMPTY_ARRAY; } if (delimiter == null) { return new String[]{str}; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index c4105771229bc..8381f2f960b75 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -194,6 +194,14 @@ public void testInvalidIPFilter() { assertEquals("invalid IP address [" + invalidIP + "] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } + public void testNull() { + Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, + IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + + IndexMetaData.builder("test") + .settings(settings(Version.CURRENT).putNull(filterSetting.getKey() + "name")).numberOfShards(2).numberOfReplicas(0).build(); + } + public void testWildcardIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); Setting filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING, From efe2e521180f989218898867c5509d860fc46312 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 16 Jan 2018 10:50:07 +0100 Subject: [PATCH 22/94] Fix eclipse build. (#28236) Relates #28191 --- libs/elasticsearch-core/src/main/eclipse-build.gradle | 2 ++ libs/elasticsearch-core/src/test/eclipse-build.gradle | 6 ++++++ settings.gradle | 5 +++++ 3 files changed, 13 insertions(+) create mode 100644 libs/elasticsearch-core/src/main/eclipse-build.gradle create mode 100644 libs/elasticsearch-core/src/test/eclipse-build.gradle diff --git a/libs/elasticsearch-core/src/main/eclipse-build.gradle b/libs/elasticsearch-core/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..9c84a4d6bd84b --- /dev/null +++ b/libs/elasticsearch-core/src/main/eclipse-build.gradle @@ -0,0 +1,2 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' diff --git a/libs/elasticsearch-core/src/test/eclipse-build.gradle b/libs/elasticsearch-core/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f43f019941bb2 --- /dev/null +++ b/libs/elasticsearch-core/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ +// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:elasticsearch-core') +} diff --git a/settings.gradle b/settings.gradle index b844af52df76b..46ecb3dad1c97 100644 --- a/settings.gradle +++ b/settings.gradle @@ -110,6 +110,7 @@ if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' + projects << 'libs:elasticsearch-core-tests' projects << 'libs:elasticsearch-nio-tests' } @@ -128,6 +129,10 @@ if (isEclipse) { project(":server").buildFileName = 'eclipse-build.gradle' project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') project(":server-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/main') + project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') + project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/main') project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') From 67c1f1c856cad9624087931e7ca1285e16cd55f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 12:05:03 +0100 Subject: [PATCH 23/94] [Docs] Fix Java Api index administration usage (#28133) The Java API documentation for index administration currenty is wrong because the PutMappingRequestBuilder#setSource(Object... source) and CreateIndexRequestBuilder#addMapping(String type, Object... source) methods delegate to methods that check that the input arguments are valid key/value pairs: https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-admin-indices.html This changes the docs so the java api code examples are included from documentation integration tests so we detect compile and runtime issues earlier. Closes #28131 --- .../admin/indices/put-mapping.asciidoc | 57 +++------------- .../admin/indices/create/CreateIndexIT.java | 68 +++++++++++++++++++ 2 files changed, 77 insertions(+), 48 deletions(-) diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index e52c66d96c3bb..97cfcf589b9d8 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -1,21 +1,13 @@ [[java-admin-indices-put-mapping]] +:base-dir: {docdir}/../../core/src/test/java/org/elasticsearch/action/admin/indices/create + ==== Put Mapping The PUT mapping API allows you to add a new type while creating an index: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().prepareCreate("twitter") <1> - .addMapping("tweet", "{\n" + <2> - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - " }") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[addMapping-create-index-request] -------------------------------------------------- <1> <> called `twitter` <2> It also adds a `tweet` mapping type. @@ -23,32 +15,9 @@ client.admin().indices().prepareCreate("twitter") <1> The PUT mapping API also allows to add a new type to an existing index: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().preparePutMapping("twitter") <1> - .setType("user") <2> - .setSource("{\n" + <3> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}") - .get(); - -// You can also provide the type in the source document -client.admin().indices().preparePutMapping("twitter") - .setType("user") - .setSource("{\n" + - " \"user\":{\n" + <4> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source] -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Adds a `user` mapping type. @@ -57,20 +26,12 @@ client.admin().indices().preparePutMapping("twitter") You can use the same API to update an existing mapping: -[source,java] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -client.admin().indices().preparePutMapping("twitter") <1> - .setType("user") <2> - .setSource("{\n" + <3> - " \"properties\": {\n" + - " \"user_name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}") - .get(); +include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source-append] -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Updates the `user` mapping type. <3> This `user` has now a new field `user_name` +:base-dir!: \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 14d6647071453..2ebb84ef92a72 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.create; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; @@ -28,6 +29,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -35,6 +37,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -400,4 +403,69 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat(e, hasToString(containsString("unknown setting [index.foo]"))); } + /** + * This test method is used to generate the Put Mapping Java Indices API documentation + * at "docs/java-api/admin/indices/put-mapping.asciidoc" so the documentation gets tested + * so that it compiles and runs without throwing errors at runtime. + */ + public void testPutMappingDocumentation() throws Exception { + Client client = client(); + // tag::addMapping-create-index-request + client.admin().indices().prepareCreate("twitter") // <1> + .addMapping("tweet", "{\n" + // <2> + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }", XContentType.JSON) + .get(); + // end::addMapping-create-index-request + + // we need to delete in order to create a fresh new index with another type + client.admin().indices().prepareDelete("twitter").get(); + client.admin().indices().prepareCreate("twitter").get(); + + // tag::putMapping-request-source + client.admin().indices().preparePutMapping("twitter") // <1> + .setType("user") // <2> + .setSource("{\n" + // <3> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + + // You can also provide the type in the source document + client.admin().indices().preparePutMapping("twitter") + .setType("user") + .setSource("{\n" + + " \"user\":{\n" + // <4> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + // end::putMapping-request-source + + // tag::putMapping-request-source-append + client.admin().indices().preparePutMapping("twitter") // <1> + .setType("user") // <2> + .setSource("{\n" + // <3> + " \"properties\": {\n" + + " \"user_name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}", XContentType.JSON) + .get(); + // end::putMapping-request-source-append + } } From 65e90079adb638f33be5ffb3f387169f15eb7f2b Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 16 Jan 2018 08:37:42 -0500 Subject: [PATCH 24/94] Open engine should keep only starting commit (#28228) Keeping unsafe commits when opening an engine can be problematic because these commits are not safe at the recovering time but they can suddenly become safe in the future. The following issues can happen if unsafe commits are kept oninit. 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1 (max_seqno=1) and an unsafe commit c2 (max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document (seqno=2) is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use the unsafe commit c2 (max_seqno=2 <= gcp=2) as the starting commit for sequenced based recovery even the commit c2 contains a stale operation and the document (with seqno=2) will not be replicated to the replica. 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when a replica with a safe commit c1 (local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2 (local_checkpoint=2, recovery_translog_gen=2). The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 while the local checkpoint of c2 is 2. 3. Commit without translog can be used for recovery. An old index, which was created before multiple-commits is introduced (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. These issues can be avoided if the combined deletion policy keeps only the starting commit onInit. Relates #27804 Relates #28181 --- .../index/engine/CombinedDeletionPolicy.java | 49 ++++++++++++--- .../index/engine/InternalEngine.java | 60 +++++++++++------- .../elasticsearch/index/shard/IndexShard.java | 15 +++-- .../org/elasticsearch/index/store/Store.java | 10 +-- .../engine/CombinedDeletionPolicyTests.java | 43 +++++++++++-- .../index/engine/InternalEngineTests.java | 63 ++++++++++++++++++- .../RecoveryDuringReplicationTests.java | 46 +++++++++++++- 7 files changed, 234 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index e5d8cacf73657..ca0d93fa7c5aa 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -45,37 +45,72 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final TranslogDeletionPolicy translogDeletionPolicy; private final EngineConfig.OpenMode openMode; private final LongSupplier globalCheckpointSupplier; + private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private IndexCommit lastCommit; // the most recent commit point CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier) { + LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { this.openMode = openMode; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.startingCommit = startingCommit; this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override - public void onInit(List commits) throws IOException { + public synchronized void onInit(List commits) throws IOException { switch (openMode) { case CREATE_INDEX_AND_TRANSLOG: + assert startingCommit == null : "CREATE_INDEX_AND_TRANSLOG must not have starting commit; commit [" + startingCommit + "]"; break; case OPEN_INDEX_CREATE_TRANSLOG: - assert commits.isEmpty() == false : "index is opened, but we have no commits"; - // When an engine starts with OPEN_INDEX_CREATE_TRANSLOG, a new fresh index commit will be created immediately. - // We therefore can simply skip processing here as `onCommit` will be called right after with a new commit. - break; case OPEN_INDEX_AND_TRANSLOG: assert commits.isEmpty() == false : "index is opened, but we have no commits"; - onCommit(commits); + assert startingCommit != null && commits.contains(startingCommit) : "Starting commit not in the existing commit list; " + + "startingCommit [" + startingCommit + "], commit list [" + commits + "]"; + keepOnlyStartingCommitOnInit(commits); + // OPEN_INDEX_CREATE_TRANSLOG can open an index commit from other shard with a different translog history, + // We therefore should not use that index commit to update the translog deletion policy. + if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + updateTranslogDeletionPolicy(); + } break; default: throw new IllegalArgumentException("unknown openMode [" + openMode + "]"); } } + /** + * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe + * at the recovering time but they can suddenly become safe in the future. + * The following issues can happen if unsafe commits are kept oninit. + *

+ * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) + * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) + * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use + * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the + * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. + *

+ * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit + * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). + * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new + * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery + * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 + * while the local checkpoint of c2 is 2. + *

+ * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced + * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, + * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. + */ + private void keepOnlyStartingCommitOnInit(List commits) { + commits.stream().filter(commit -> startingCommit.equals(commit) == false).forEach(IndexCommit::delete); + assert startingCommit.isDeleted() == false : "Starting commit must not be deleted"; + lastCommit = startingCommit; + safeCommit = startingCommit; + } + @Override public synchronized void onCommit(List commits) throws IOException { final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1b7b891efd6ff..1efbd0706d156 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -185,7 +185,7 @@ public InternalEngine(EngineConfig engineConfig) { "Starting commit should be non-null; mode [" + openMode + "]; startingCommit [" + startingCommit + "]"; this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier, startingCommit); this.combinedDeletionPolicy = new CombinedDeletionPolicy(openMode, translogDeletionPolicy, - translog::getLastSyncedGlobalCheckpoint); + translog::getLastSyncedGlobalCheckpoint, startingCommit); writer = createWriter(openMode == EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, startingCommit); updateMaxUnsafeAutoIdTimestampFromWriter(writer); assert engineConfig.getForceNewHistoryUUID() == false @@ -411,28 +411,44 @@ public void skipTranslogRecovery() { } private IndexCommit getStartingCommitPoint() throws IOException { - if (openMode == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); - final long minRetainedTranslogGen = translog.getMinFileGeneration(); - final List existingCommits = DirectoryReader.listCommits(store.directory()); - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose full translog - // files are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog files are fully retained. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); + final IndexCommit startingIndexCommit; + final List existingCommits; + switch (openMode) { + case CREATE_INDEX_AND_TRANSLOG: + startingIndexCommit = null; + break; + case OPEN_INDEX_CREATE_TRANSLOG: + // Use the last commit + existingCommits = DirectoryReader.listCommits(store.directory()); + startingIndexCommit = existingCommits.get(existingCommits.size() - 1); + break; + case OPEN_INDEX_AND_TRANSLOG: + // Use the safe commit + final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); + final long minRetainedTranslogGen = translog.getMinFileGeneration(); + existingCommits = DirectoryReader.listCommits(store.directory()); + // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog + // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. + // To avoid this issue, we only select index commits whose translog are fully retained. + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { + final List recoverableCommits = new ArrayList<>(); + for (IndexCommit commit : existingCommits) { + if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { + recoverableCommits.add(commit); + } } + assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + + "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); + } else { + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); } - assert recoverableCommits.isEmpty() == false : "No commit point with full translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - return CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - return CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } + break; + default: + throw new IllegalArgumentException("unknown mode: " + openMode); } - return null; + return startingIndexCommit; } private void recoverFromTranslogInternal() throws IOException { @@ -557,9 +573,7 @@ private ExternalSearcherManager createSearcherManager(SearchFactory externalSear final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); internalSearcherManager = new SearcherManager(directoryReader, new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); - // The index commit from IndexWriterConfig is null if the engine is open with other modes - // rather than CREATE_INDEX_AND_TRANSLOG. In those cases lastCommittedSegmentInfos will be retrieved from the last commit. - lastCommittedSegmentInfos = store.readCommittedSegmentsInfo(indexWriter.getConfig().getIndexCommit()); + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); ExternalSearcherManager externalSearcherManager = new ExternalSearcherManager(internalSearcherManager, externalSearcherFactory); success = true; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 3832cd0ae2055..b5d28b3a9ecce 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; @@ -1290,12 +1291,16 @@ public void createIndexAndTranslog() throws IOException { /** opens the engine on top of the existing lucene engine but creates an empty translog **/ public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalCheckpoint) throws IOException { - assert recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE && - recoveryState.getRecoverySource().getType() != RecoverySource.Type.EXISTING_STORE; - SequenceNumbers.CommitInfo commitInfo = store.loadSeqNoInfo(null); - assert commitInfo.localCheckpoint >= globalCheckpoint : - "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" + if (Assertions.ENABLED) { + assert recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE && + recoveryState.getRecoverySource().getType() != RecoverySource.Type.EXISTING_STORE; + SequenceNumbers.CommitInfo commitInfo = store.loadSeqNoInfo(null); + assert commitInfo.localCheckpoint >= globalCheckpoint : + "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" + globalCheckpoint + "]"; + final List existingCommits = DirectoryReader.listCommits(store.directory()); + assert existingCommits.size() == 1 : "Open index create translog should have one commit, commits[" + existingCommits + "]"; + } globalCheckpointTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "opening index with a new translog"); innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, forceNewHistoryUUID); } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 74be98b813238..7aab2c750d139 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -182,17 +182,9 @@ public Directory directory() { * @throws IOException if the index is corrupted or the segments file is not present */ public SegmentInfos readLastCommittedSegmentsInfo() throws IOException { - return readCommittedSegmentsInfo(null); - } - - /** - * Returns the committed segments info for the given commit point. - * If the commit point is not provided, this method will return the segments info of the last commit in the store. - */ - public SegmentInfos readCommittedSegmentsInfo(final IndexCommit commit) throws IOException { failIfCorrupted(); try { - return readSegmentsInfo(commit, directory()); + return readSegmentsInfo(null, directory()); } catch (CorruptIndexException ex) { markStoreCorrupted(ex); throw ex; diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index e74cde52aa418..ca6059dae0067 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -54,7 +54,8 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); @@ -93,7 +94,8 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); long lastMaxSeqNo = between(1, 1000); long lastTranslogGen = between(1, 20); int safeIndex = 0; @@ -156,11 +158,12 @@ public void testLegacyIndex() throws Exception { final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); - indexPolicy.onInit(singletonList(legacyCommit)); + indexPolicy.onCommit(singletonList(legacyCommit)); verify(legacyCommit, never()).delete(); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(legacyTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(legacyTranslogGen)); @@ -188,7 +191,8 @@ public void testLegacyIndex() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(OPEN_INDEX_CREATE_TRANSLOG, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_CREATE_TRANSLOG, translogPolicy, globalCheckpoint::get, null); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); @@ -211,6 +215,35 @@ public void testDeleteInvalidCommits() throws Exception { } } + /** + * Keeping existing unsafe commits can be problematic because these commits are not safe at the recovering time + * but they can suddenly become safe in the future. See {@link CombinedDeletionPolicy#keepOnlyStartingCommitOnInit(List)} + */ + public void testKeepOnlyStartingCommitOnInit() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); + TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + final UUID translogUUID = UUID.randomUUID(); + final List commitList = new ArrayList<>(); + int totalCommits = between(2, 20); + for (int i = 0; i < totalCommits; i++) { + commitList.add(mockIndexCommit(randomNonNegativeLong(), translogUUID, randomNonNegativeLong())); + } + final IndexCommit startingCommit = randomFrom(commitList); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, startingCommit); + indexPolicy.onInit(commitList); + for (IndexCommit commit : commitList) { + if (commit.equals(startingCommit) == false) { + verify(commit, times(1)).delete(); + } + } + verify(startingCommit, never()).delete(); + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), + equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), + equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + } + IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 518411e59e8cd..db62db7e01b46 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -163,6 +163,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; @@ -4010,13 +4011,15 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { boolean flushed = false; + AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); Engine recoveringEngine = null; try { assertEquals(docs - 1, engine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); - recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + recoveringEngine = new InternalEngine(copy( + replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().uncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -4038,6 +4041,8 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); if ((flushed = randomBoolean())) { + globalCheckpoint.set(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); + recoveringEngine.getTranslog().sync(); recoveringEngine.flush(true, true); } } @@ -4047,7 +4052,8 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { // now do it again to make sure we preserve values etc. try { - recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + recoveringEngine = new InternalEngine( + copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); if (flushed) { assertEquals(0, recoveringEngine.getTranslog().uncommittedOperations()); } @@ -4355,4 +4361,57 @@ public void testAcquireIndexCommit() throws Exception { assertThat(DirectoryReader.listCommits(engine.store.directory()), hasSize(1)); } } + + public void testOpenIndexAndTranslogKeepOnlySafeCommit() throws Exception { + IOUtils.close(engine); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get); + final IndexCommit safeCommit; + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { + final int numDocs = between(5, 50); + for (int i = 0; i < numDocs; i++) { + index(engine, i); + if (randomBoolean()) { + engine.flush(); + } + } + // Selects a starting commit and advances and persists the global checkpoint to that commit. + final List commits = DirectoryReader.listCommits(engine.store.directory()); + safeCommit = randomFrom(commits); + globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + engine.getTranslog().sync(); + } + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); + assertThat("OPEN_INDEX_AND_TRANSLOG should keep only safe commit", existingCommits, contains(safeCommit)); + } + } + + public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { + IOUtils.close(engine); + final EngineConfig config = copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); + final Map lastCommit; + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + engine.skipTranslogRecovery(); + final int numDocs = between(5, 50); + for (int i = 0; i < numDocs; i++) { + index(engine, i); + if (randomBoolean()) { + engine.flush(); + } + } + final List commits = DirectoryReader.listCommits(engine.store.directory()); + lastCommit = commits.get(commits.size() - 1).getUserData(); + } + try (InternalEngine engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG))) { + final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); + assertThat("OPEN_INDEX_CREATE_TRANSLOG should keep only last commit", existingCommits, hasSize(1)); + final Map userData = existingCommits.get(0).getUserData(); + assertThat(userData.get(SequenceNumbers.MAX_SEQ_NO), equalTo(lastCommit.get(SequenceNumbers.MAX_SEQ_NO))); + assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(lastCommit.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); + // Translog tags should be fresh. + assertThat(userData.get(Translog.TRANSLOG_UUID_KEY), not(equalTo(lastCommit.get(Translog.TRANSLOG_UUID_KEY)))); + assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index aa97c2049915f..cd948ed9f9036 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -304,8 +304,52 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { replica.store().close(); newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); - shards.assertAllEqual(totalDocs); + // Make sure that flushing on a recovering shard is ok. + shards.flush(); + shards.assertAllEqual(totalDocs); + } + } + + public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(2)) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + IndexShard newPrimary = shards.getReplicas().get(0); + IndexShard replica = shards.getReplicas().get(1); + int goodDocs = shards.indexDocs(scaledRandomIntBetween(1, 20)); + shards.flush(); + // simulate docs that were inflight when primary failed, these will be rolled back + int staleDocs = scaledRandomIntBetween(1, 10); + logger.info("--> indexing {} stale docs", staleDocs); + for (int i = 0; i < staleDocs; i++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "stale_" + i) + .source("{}", XContentType.JSON); + final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); + indexOnReplica(bulkShardRequest, replica); + } + shards.flush(); + shards.promoteReplicaToPrimary(newPrimary).get(); + // Recover a replica should rollback the stale documents + shards.removeReplica(replica); + replica.close("recover replica - first time", false); + replica.store().close(); + replica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(replica); + shards.assertAllEqual(goodDocs); + // Index more docs - move the global checkpoint >= seqno of the stale operations. + goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5)); + shards.syncGlobalCheckpoint(); + assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); + // Recover a replica again should also rollback the stale documents. + shards.removeReplica(replica); + replica.close("recover replica - second time", false); + replica.store().close(); + IndexShard anotherReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); + shards.recoverReplica(anotherReplica); + shards.assertAllEqual(goodDocs); + shards.flush(); + shards.assertAllEqual(goodDocs); } } From 4f5be7db3ce9f1ea7f864cc1fd38ee09363aa64d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 15:19:47 +0100 Subject: [PATCH 25/94] [Docs] Fix base directory to include for put_mapping.asciidoc --- docs/java-api/admin/indices/put-mapping.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index 97cfcf589b9d8..887f6cb76e7c6 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -1,5 +1,5 @@ [[java-admin-indices-put-mapping]] -:base-dir: {docdir}/../../core/src/test/java/org/elasticsearch/action/admin/indices/create +:base-dir: {docdir}/../../server/src/test/java/org/elasticsearch/action/admin/indices/create ==== Put Mapping @@ -34,4 +34,4 @@ include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source-append] <2> Updates the `user` mapping type. <3> This `user` has now a new field `user_name` -:base-dir!: \ No newline at end of file +:base-dir!: From d4ac0026fc7dacbf66c184327b8e39b15d0a2d56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 15:53:28 +0100 Subject: [PATCH 26/94] [Docs] Clarify numeric datatype ranges (#28240) Since #25826 we reject infinite values for float, double and half_float datatypes. This change adds this restriction to the documentation for the supported datatypes. Closes #27653 --- docs/reference/mapping/types/numeric.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 5e9de317bac38..af58eac659e48 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -8,9 +8,9 @@ The following numeric types are supported: `integer`:: A signed 32-bit integer with a minimum value of +-2^31^+ and a maximum value of +2^31^-1+. `short`:: A signed 16-bit integer with a minimum value of +-32,768+ and a maximum value of +32,767+. `byte`:: A signed 8-bit integer with a minimum value of +-128+ and a maximum value of +127+. -`double`:: A double-precision 64-bit IEEE 754 floating point number. -`float`:: A single-precision 32-bit IEEE 754 floating point number. -`half_float`:: A half-precision 16-bit IEEE 754 floating point number. +`double`:: A double-precision 64-bit IEEE 754 floating point number, restricted to finite values. +`float`:: A single-precision 32-bit IEEE 754 floating point number, restricted to finite values. +`half_float`:: A half-precision 16-bit IEEE 754 floating point number, restricted to finite values. `scaled_float`:: A floating point number that is backed by a `long`, scaled by a fixed `double` scaling factor. Below is an example of configuring a mapping with numeric fields: From 853f7e878031d43267b7365f3b2b4beec513aa10 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 10 Nov 2017 07:19:01 +0100 Subject: [PATCH 27/94] Added multi get api to the high level rest client. Relates to #27205 --- .../org/elasticsearch/client/Request.java | 10 ++ .../client/RestHighLevelClient.java | 21 ++++ .../java/org/elasticsearch/client/CrudIT.java | 60 ++++++++++ .../elasticsearch/client/RequestTests.java | 54 +++++++++ .../action/get/MultiGetResponseTests.java | 83 +++++++++++++ .../action/get/MultiGetRequest.java | 98 ++++++++++----- .../action/get/MultiGetResponse.java | 113 +++++++++++++++--- .../org/elasticsearch/index/VersionType.java | 5 + .../elasticsearch/index/get/GetResult.java | 9 +- .../action/get/MultiGetRequestTests.java | 61 ++++++++++ 10 files changed, 467 insertions(+), 47 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index dd08179cf6297..d35db1c637d4c 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; @@ -312,6 +313,15 @@ static Request get(GetRequest getRequest) { return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withPreference(multiGetRequest.preference()); + parameters.withRealtime(multiGetRequest.realtime()); + parameters.withRefresh(multiGetRequest.refresh()); + HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpGet.METHOD_NAME, "/_mget", parameters.getParams(), entity); + } + static Request index(IndexRequest indexRequest) { String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index ca244eee88c62..cad7449c689ca 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -34,6 +34,8 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.main.MainRequest; @@ -289,6 +291,25 @@ public final void getAsync(GetRequest getRequest, ActionListener li performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers); } + /** + * Retrieves multiple documents by id using the Multi Get API + * + * See Multi Get API on elastic.co + */ + public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { + return performRequestAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); + } + + /** + * Asynchronously retrieves multiple documents by id using the Multi Get API + * + * See Multi Get API on elastic.co + */ + public void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener, + singleton(404), headers); + } + /** * Checks for the existence of a document. Returns true if it exists, false otherwise * diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e36c445082ed6..14d29ddd9eb67 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -33,6 +33,8 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateRequest; @@ -238,6 +240,64 @@ public void testGet() throws IOException { } } + public void testMultiGet() throws IOException { + { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "type", "id1"); + multiGetRequest.add("index", "type", "id2"); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + assertEquals(2, response.getResponses().length); + + assertTrue(response.getResponses()[0].isFailed()); + assertNull(response.getResponses()[0].getResponse()); + assertEquals("id1", response.getResponses()[0].getFailure().getId()); + assertEquals("type", response.getResponses()[0].getFailure().getType()); + assertEquals("index", response.getResponses()[0].getFailure().getIndex()); + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + response.getResponses()[0].getFailure().getFailure().getMessage()); + + assertTrue(response.getResponses()[1].isFailed()); + assertNull(response.getResponses()[1].getResponse()); + assertEquals("id2", response.getResponses()[1].getId()); + assertEquals("type", response.getResponses()[1].getType()); + assertEquals("index", response.getResponses()[1].getIndex()); + assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", + response.getResponses()[1].getFailure().getFailure().getMessage()); + } + + String document = "{\"field\":\"value1\"}"; + StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); + Response r = client().performRequest("PUT", "/index/type/id1", Collections.singletonMap("refresh", "true"), stringEntity); + assertEquals(201, r.getStatusLine().getStatusCode()); + + document = "{\"field\":\"value2\"}"; + stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); + r = client().performRequest("PUT", "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity); + assertEquals(201, r.getStatusLine().getStatusCode()); + + { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add("index", "type", "id1"); + multiGetRequest.add("index", "type", "id2"); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + assertEquals(2, response.getResponses().length); + + assertFalse(response.getResponses()[0].isFailed()); + assertNull(response.getResponses()[0].getFailure()); + assertEquals("id1", response.getResponses()[0].getId()); + assertEquals("type", response.getResponses()[0].getType()); + assertEquals("index", response.getResponses()[0].getIndex()); + assertEquals(Collections.singletonMap("field", "value1"), response.getResponses()[0].getResponse().getSource()); + + assertFalse(response.getResponses()[1].isFailed()); + assertNull(response.getResponses()[1].getFailure()); + assertEquals("id2", response.getResponses()[1].getId()); + assertEquals("type", response.getResponses()[1].getType()); + assertEquals("index", response.getResponses()[1].getIndex()); + assertEquals(Collections.singletonMap("field", "value2"), response.getResponses()[1].getResponse().getSource()); + } + } + public void testIndex() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 019162bae37a7..acb27fff7e2ef 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; @@ -147,6 +148,59 @@ public void testGet() { getAndExistsTest(Request::get, "GET"); } + public void testMultiGet() throws IOException { + Map expectedParams = new HashMap<>(); + MultiGetRequest multiGetRequest = new MultiGetRequest(); + if (randomBoolean()) { + String preference = randomAlphaOfLength(4); + multiGetRequest.preference(preference); + expectedParams.put("preference", preference); + } + if (randomBoolean()) { + multiGetRequest.realtime(randomBoolean()); + if (multiGetRequest.realtime() == false) { + expectedParams.put("realtime", "false"); + } + } + if (randomBoolean()) { + multiGetRequest.refresh(randomBoolean()); + if (multiGetRequest.refresh()) { + expectedParams.put("refresh", "true"); + } + } + + int numberOfRequests = randomIntBetween(0, 32); + for (int i = 0; i < numberOfRequests; i++) { + MultiGetRequest.Item item = + new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + if (randomBoolean()) { + item.routing(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.parent(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.storedFields(generateRandomStringArray(16, 8, false)); + } + if (randomBoolean()) { + item.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + item.versionType(randomFrom(VersionType.values())); + } + if (randomBoolean()) { + randomizeFetchSourceContextParams(item::fetchSourceContext, new HashMap<>()); + } + multiGetRequest.add(item); + } + + Request request = Request.multiGet(multiGetRequest); + assertEquals("GET", request.getMethod()); + assertEquals("/_mget", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(multiGetRequest, request.getEntity()); + } + public void testDelete() { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java new file mode 100644 index 0000000000000..82638870eef58 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.get; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MultiGetResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + for (int runs = 0; runs < 20; runs++) { + MultiGetResponse expected = createTestInstance(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); + + XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled); + MultiGetResponse parsed = MultiGetResponse.fromXContent(parser); + assertNull(parser.nextToken()); + assertNotSame(expected, parsed); + + assertThat(parsed.getResponses().length, equalTo(expected.getResponses().length)); + for (int i = 0; i < expected.getResponses().length; i++) { + MultiGetItemResponse expectedItem = expected.getResponses()[i]; + MultiGetItemResponse actualItem = parsed.getResponses()[i]; + assertThat(actualItem.getIndex(), equalTo(expectedItem.getIndex())); + assertThat(actualItem.getType(), equalTo(expectedItem.getType())); + assertThat(actualItem.getId(), equalTo(expectedItem.getId())); + if (expectedItem.isFailed()) { + assertThat(actualItem.isFailed(), is(true)); + assertThat(actualItem.getFailure().getMessage(), containsString(expectedItem.getFailure().getMessage())); + } else { + assertThat(actualItem.isFailed(), is(false)); + assertThat(actualItem.getResponse(), equalTo(expectedItem.getResponse())); + } + } + } + } + + private static MultiGetResponse createTestInstance() { + MultiGetItemResponse[] items = new MultiGetItemResponse[randomIntBetween(0, 128)]; + for (int i = 0; i < items.length; i++) { + if (randomBoolean()) { + items[i] = new MultiGetItemResponse(new GetResponse(new GetResult( + randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), randomNonNegativeLong(), + true, null, null + )), null); + } else { + items[i] = new MultiGetItemResponse(null, new MultiGetResponse.Failure(randomAlphaOfLength(4), + randomAlphaOfLength(4), randomAlphaOfLength(4), new RuntimeException(randomAlphaOfLength(4)))); + } + } + return new MultiGetResponse(items); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 48e3f5e81bf6f..a7b63da8974fd 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -35,7 +35,10 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -47,8 +50,10 @@ import java.util.List; import java.util.Locale; -public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiGetRequest extends ActionRequest + implements Iterable, CompositeIndicesRequest, RealtimeRequest, ToXContentObject { + private static final ParseField DOCS = new ParseField("docs"); private static final ParseField INDEX = new ParseField("_index"); private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); @@ -63,7 +68,8 @@ public class MultiGetRequest extends ActionRequest implements Iterable items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { + private static void parseDocuments(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token != XContentParser.Token.START_OBJECT) { + Token token; + while ((token = parser.nextToken()) != Token.END_ARRAY) { + if (token != Token.START_OBJECT) { throw new IllegalArgumentException("docs array element should include an object"); } String index = defaultIndex; @@ -387,8 +414,8 @@ public static void parseDocuments(XContentParser parser, List items, @Null FetchSourceContext fetchSourceContext = FetchSourceContext.FETCH_SOURCE; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { if (INDEX.match(currentFieldName)) { @@ -419,7 +446,7 @@ public static void parseDocuments(XContentParser parser, List items, @Null if (parser.isBooleanValueLenient()) { fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(), fetchSourceContext.excludes()); - } else if (token == XContentParser.Token.VALUE_STRING) { + } else if (token == Token.VALUE_STRING) { fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), new String[]{parser.text()}, fetchSourceContext.excludes()); } else { @@ -428,30 +455,30 @@ public static void parseDocuments(XContentParser parser, List items, @Null } else { throw new ElasticsearchParseException("failed to parse multi get request. unknown field [{}]", currentFieldName); } - } else if (token == XContentParser.Token.START_ARRAY) { + } else if (token == Token.START_ARRAY) { if (FIELDS.match(currentFieldName)) { throw new ParsingException(parser.getTokenLocation(), "Unsupported field [fields] used, expected [stored_fields] instead"); } else if (STORED_FIELDS.match(currentFieldName)) { storedFields = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { storedFields.add(parser.text()); } } else if (SOURCE.match(currentFieldName)) { ArrayList includes = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { includes.add(parser.text()); } fetchSourceContext = new FetchSourceContext(fetchSourceContext.fetchSource(), includes.toArray(Strings.EMPTY_ARRAY) , fetchSourceContext.excludes()); } - } else if (token == XContentParser.Token.START_OBJECT) { + } else if (token == Token.START_OBJECT) { if (SOURCE.match(currentFieldName)) { List currentList = null, includes = null, excludes = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); if ("includes".equals(currentFieldName) || "include".equals(currentFieldName)) { currentList = includes != null ? includes : (includes = new ArrayList<>(2)); @@ -460,8 +487,8 @@ public static void parseDocuments(XContentParser parser, List items, @Null } else { throw new ElasticsearchParseException("source definition may not contain [{}]", parser.text()); } - } else if (token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + } else if (token == Token.START_ARRAY) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { currentList.add(parser.text()); } } else if (token.isValue()) { @@ -488,13 +515,9 @@ public static void parseDocuments(XContentParser parser, List items, @Null } } - public static void parseDocuments(XContentParser parser, List items) throws IOException { - parseDocuments(parser, items, null, null, null, null, null, true); - } - public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting) throws IOException { - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + Token token; + while ((token = parser.nextToken()) != Token.END_ARRAY) { if (!token.isValue()) { throw new IllegalArgumentException("ids array element should only contain ids"); } @@ -537,4 +560,17 @@ public void writeTo(StreamOutput out) throws IOException { item.writeTo(out); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(DOCS.getPreferredName()); + for (Item item : items) { + builder.value(item); + } + builder.endArray(); + builder.endObject(); + return builder; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 93e4272bd956c..9cd9f71a6c53a 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -21,29 +21,41 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.get.GetResult; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; +import java.util.List; public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { + private static final ParseField INDEX = new ParseField("_index"); + private static final ParseField TYPE = new ParseField("_type"); + private static final ParseField ID = new ParseField("_id"); + private static final ParseField ERROR = new ParseField("error"); + private static final ParseField DOCS = new ParseField("docs"); + /** * Represents a failure. */ - public static class Failure implements Streamable { + public static class Failure implements Streamable, ToXContentObject { + private String index; private String type; private String id; private Exception exception; Failure() { - } public Failure(String index, String type, String id, Exception exception) { @@ -103,6 +115,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(exception); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX.getPreferredName(), index); + builder.field(TYPE.getPreferredName(), type); + builder.field(ID.getPreferredName(), id); + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + builder.endObject(); + return builder; + } + public Exception getFailure() { return exception; } @@ -129,16 +152,11 @@ public Iterator iterator() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startArray(Fields.DOCS); + builder.startArray(DOCS.getPreferredName()); for (MultiGetItemResponse response : responses) { if (response.isFailed()) { - builder.startObject(); Failure failure = response.getFailure(); - builder.field(Fields._INDEX, failure.getIndex()); - builder.field(Fields._TYPE, failure.getType()); - builder.field(Fields._ID, failure.getId()); - ElasticsearchException.generateFailureXContent(builder, params, failure.getFailure(), true); - builder.endObject(); + failure.toXContent(builder, params); } else { GetResponse getResponse = response.getResponse(); getResponse.toXContent(builder, params); @@ -149,11 +167,78 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static final class Fields { - static final String DOCS = "docs"; - static final String _INDEX = "_index"; - static final String _TYPE = "_type"; - static final String _ID = "_id"; + public static MultiGetResponse fromXContent(XContentParser parser) throws IOException { + String currentFieldName = null; + List items = new ArrayList<>(); + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + break; + case START_ARRAY: + if (DOCS.getPreferredName().equals(currentFieldName)) { + for (token = parser.nextToken(); token != Token.END_ARRAY; token = parser.nextToken()) { + if (token == Token.START_OBJECT) { + items.add(parseItem(parser)); + } + } + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + } + return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); + } + + private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { + String currentFieldName = null; + String index = null; + String type = null; + String id = null; + ElasticsearchException exception = null; + GetResult getResult = null; + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + if (INDEX.match(currentFieldName) == false && TYPE.match(currentFieldName) == false && + ID.match(currentFieldName) == false && ERROR.match(currentFieldName) == false) { + getResult = GetResult.fromXContentEmbedded(parser, index, type, id); + } + break; + case VALUE_STRING: + if (INDEX.match(currentFieldName)) { + index = parser.text(); + } else if (TYPE.match(currentFieldName)) { + type = parser.text(); + } else if (ID.match(currentFieldName)) { + id = parser.text(); + } + break; + case START_OBJECT: + if (ERROR.match(currentFieldName)) { + exception = ElasticsearchException.fromXContent(parser); + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + if (getResult != null) { + break; + } + } + + if (exception != null) { + return new MultiGetItemResponse(null, new Failure(index, type, id, exception)); + } else { + GetResponse getResponse = new GetResponse(getResult); + return new MultiGetItemResponse(getResponse, null); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/VersionType.java b/server/src/main/java/org/elasticsearch/index/VersionType.java index c5094ea185db1..6a8214cb0b8ec 100644 --- a/server/src/main/java/org/elasticsearch/index/VersionType.java +++ b/server/src/main/java/org/elasticsearch/index/VersionType.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import java.io.IOException; +import java.util.Locale; public enum VersionType implements Writeable { INTERNAL((byte) 0) { @@ -350,6 +351,10 @@ public static VersionType fromString(String versionType, VersionType defaultVers return fromString(versionType); } + public static String toString(VersionType versionType) { + return versionType.name().toLowerCase(Locale.ROOT); + } + public static VersionType fromValue(byte value) { if (value == 0) { return INTERNAL; diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 75e283b4191b1..4cdf2a4892690 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -269,14 +269,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static GetResult fromXContentEmbedded(XContentParser parser) throws IOException { XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + return fromXContentEmbedded(parser, null, null, null); + } + + public static GetResult fromXContentEmbedded(XContentParser parser, String index, String type, String id) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); String currentFieldName = parser.currentName(); - String index = null, type = null, id = null; long version = -1; Boolean found = null; BytesReference source = null; Map fields = new HashMap<>(); - while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index 73c77d0629295..8834ee203fba0 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -20,15 +20,21 @@ package org.elasticsearch.action.get; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class MultiGetRequestTests extends ESTestCase { @@ -129,4 +135,59 @@ public void testAddWithValidSourceValueIsAccepted() throws Exception { assertEquals(2, multiGetRequest.getItems().size()); } + + public void testXContentSerialization() throws IOException { + for (int runs = 0; runs < 20; runs++) { + MultiGetRequest expected = createTestInstance(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); + XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled); + MultiGetRequest actual = new MultiGetRequest(); + actual.add(null, null, null, null, null, parser, true); + assertThat(parser.nextToken(), nullValue()); + + assertThat(actual.items.size(), equalTo(expected.items.size())); + for (int i = 0; i < expected.items.size(); i++) { + MultiGetRequest.Item expectedItem = expected.items.get(i); + MultiGetRequest.Item actualItem = actual.items.get(i); + assertThat(actualItem, equalTo(expectedItem)); + } + } + } + + private MultiGetRequest createTestInstance() { + int numItems = randomIntBetween(0, 128); + MultiGetRequest request = new MultiGetRequest(); + for (int i = 0; i < numItems; i++) { + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + if (randomBoolean()) { + item.version(randomNonNegativeLong()); + } + if (randomBoolean()) { + item.versionType(randomFrom(VersionType.values())); + } + if (randomBoolean()) { + FetchSourceContext fetchSourceContext; + if (randomBoolean()) { + fetchSourceContext = new FetchSourceContext(true, generateRandomStringArray(16, 8, false), + generateRandomStringArray(5, 4, false)); + } else { + fetchSourceContext = new FetchSourceContext(false); + } + item.fetchSourceContext(fetchSourceContext); + } + if (randomBoolean()) { + item.storedFields(generateRandomStringArray(16, 8, false)); + } + if (randomBoolean()) { + item.routing(randomAlphaOfLength(4)); + } + if (randomBoolean()) { + item.parent(randomAlphaOfLength(4)); + } + request.add(item); + } + return request; + } + } From 409b3d2ebd3be72568b2cca0e1f604f08f6fcfd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 17:30:55 +0100 Subject: [PATCH 28/94] Revert "[Docs] Fix base directory to include for put_mapping.asciidoc" This reverts commit 4f5be7db3ce9f1ea7f864cc1fd38ee09363aa64d. --- docs/java-api/admin/indices/put-mapping.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index 887f6cb76e7c6..97cfcf589b9d8 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -1,5 +1,5 @@ [[java-admin-indices-put-mapping]] -:base-dir: {docdir}/../../server/src/test/java/org/elasticsearch/action/admin/indices/create +:base-dir: {docdir}/../../core/src/test/java/org/elasticsearch/action/admin/indices/create ==== Put Mapping @@ -34,4 +34,4 @@ include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source-append] <2> Updates the `user` mapping type. <3> This `user` has now a new field `user_name` -:base-dir!: +:base-dir!: \ No newline at end of file From 8a58df46f31640a4575157227110523f4edb001b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 16 Jan 2018 17:31:11 +0100 Subject: [PATCH 29/94] Revert "[Docs] Fix Java Api index administration usage (#28133)" This reverts commit 67c1f1c856cad9624087931e7ca1285e16cd55f7. --- .../admin/indices/put-mapping.asciidoc | 57 +++++++++++++--- .../admin/indices/create/CreateIndexIT.java | 68 ------------------- 2 files changed, 48 insertions(+), 77 deletions(-) diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index 97cfcf589b9d8..e52c66d96c3bb 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -1,13 +1,21 @@ [[java-admin-indices-put-mapping]] -:base-dir: {docdir}/../../core/src/test/java/org/elasticsearch/action/admin/indices/create - ==== Put Mapping The PUT mapping API allows you to add a new type while creating an index: -["source","java",subs="attributes,callouts,macros"] +[source,java] -------------------------------------------------- -include-tagged::{base-dir}/CreateIndexIT.java[addMapping-create-index-request] +client.admin().indices().prepareCreate("twitter") <1> + .addMapping("tweet", "{\n" + <2> + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }") + .get(); -------------------------------------------------- <1> <> called `twitter` <2> It also adds a `tweet` mapping type. @@ -15,9 +23,32 @@ include-tagged::{base-dir}/CreateIndexIT.java[addMapping-create-index-request] The PUT mapping API also allows to add a new type to an existing index: -["source","java",subs="attributes,callouts,macros"] +[source,java] -------------------------------------------------- -include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source] +client.admin().indices().preparePutMapping("twitter") <1> + .setType("user") <2> + .setSource("{\n" + <3> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}") + .get(); + +// You can also provide the type in the source document +client.admin().indices().preparePutMapping("twitter") + .setType("user") + .setSource("{\n" + + " \"user\":{\n" + <4> + " \"properties\": {\n" + + " \"name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}") + .get(); -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Adds a `user` mapping type. @@ -26,12 +57,20 @@ include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source] You can use the same API to update an existing mapping: -["source","java",subs="attributes,callouts,macros"] +[source,java] -------------------------------------------------- -include-tagged::{base-dir}/CreateIndexIT.java[putMapping-request-source-append] +client.admin().indices().preparePutMapping("twitter") <1> + .setType("user") <2> + .setSource("{\n" + <3> + " \"properties\": {\n" + + " \"user_name\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + "}") + .get(); -------------------------------------------------- <1> Puts a mapping on existing index called `twitter` <2> Updates the `user` mapping type. <3> This `user` has now a new field `user_name` -:base-dir!: \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 2ebb84ef92a72..14d6647071453 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.create; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; @@ -29,7 +28,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -37,7 +35,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -403,69 +400,4 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertThat(e, hasToString(containsString("unknown setting [index.foo]"))); } - /** - * This test method is used to generate the Put Mapping Java Indices API documentation - * at "docs/java-api/admin/indices/put-mapping.asciidoc" so the documentation gets tested - * so that it compiles and runs without throwing errors at runtime. - */ - public void testPutMappingDocumentation() throws Exception { - Client client = client(); - // tag::addMapping-create-index-request - client.admin().indices().prepareCreate("twitter") // <1> - .addMapping("tweet", "{\n" + // <2> - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - " }", XContentType.JSON) - .get(); - // end::addMapping-create-index-request - - // we need to delete in order to create a fresh new index with another type - client.admin().indices().prepareDelete("twitter").get(); - client.admin().indices().prepareCreate("twitter").get(); - - // tag::putMapping-request-source - client.admin().indices().preparePutMapping("twitter") // <1> - .setType("user") // <2> - .setSource("{\n" + // <3> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}", XContentType.JSON) - .get(); - - // You can also provide the type in the source document - client.admin().indices().preparePutMapping("twitter") - .setType("user") - .setSource("{\n" + - " \"user\":{\n" + // <4> - " \"properties\": {\n" + - " \"name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}", XContentType.JSON) - .get(); - // end::putMapping-request-source - - // tag::putMapping-request-source-append - client.admin().indices().preparePutMapping("twitter") // <1> - .setType("user") // <2> - .setSource("{\n" + // <3> - " \"properties\": {\n" + - " \"user_name\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + - " }\n" + - "}", XContentType.JSON) - .get(); - // end::putMapping-request-source-append - } } From 0a79555a122f0a2b7361c12760f32337e20fba9a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 16 Jan 2018 13:45:13 -0500 Subject: [PATCH 30/94] Require JDK 9 for compilation (#28071) This commit modifies the build to require JDK 9 for compilation. Henceforth, we will compile with a JDK 9 compiler targeting JDK 8 as the class file format. Optionally, RUNTIME_JAVA_HOME can be set as the runtime JDK used for running tests. To enable this change, we separate the meaning of the compiler Java home versus the runtime Java home. If the runtime Java home is not set (via RUNTIME_JAVA_HOME) then we fallback to using JAVA_HOME as the runtime Java home. This enables: - developers only have to set one Java home (JAVA_HOME) - developers can set an optional Java home (RUNTIME_JAVA_HOME) to test on the minimum supported runtime - we can test compiling with JDK 9 running on JDK 8 and compiling with JDK 9 running on JDK 9 in CI --- CONTRIBUTING.md | 8 ++ .../elasticsearch/gradle/BuildPlugin.groovy | 108 +++++++++--------- .../gradle/plugin/PluginBuildPlugin.groovy | 2 +- .../gradle/precommit/JarHellTask.groovy | 2 +- .../gradle/precommit/LoggerUsageTask.groovy | 2 +- .../precommit/NamingConventionsTask.groovy | 2 +- .../gradle/test/ClusterFormationTasks.groovy | 4 +- .../elasticsearch/gradle/test/NodeInfo.groovy | 2 +- distribution/bwc/build.gradle | 23 ++-- plugins/discovery-azure-classic/build.gradle | 2 +- plugins/discovery-ec2/build.gradle | 6 +- plugins/discovery-gce/build.gradle | 2 +- plugins/ingest-attachment/build.gradle | 29 ++--- plugins/jvm-example/build.gradle | 2 +- plugins/repository-hdfs/build.gradle | 13 +-- qa/reindex-from-old/build.gradle | 5 +- 16 files changed, 105 insertions(+), 107 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 92ffa75de6c23..30e8261c87427 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,6 +92,14 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) +JDK 9 is required to build Elasticsearch. You must have a JDK 9 installation +with the environment variable `JAVA_HOME` referencing the path to Java home for +your JDK 9 installation. By default, tests use the same runtime as `JAVA_HOME`. +However, since Elasticsearch, supports JDK 8 the build supports compiling with +JDK 9 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` +pointing to the Java home of a JDK 8 installation. Note that this mechanism can +be used to test against other JDKs as well, this is not only limited to JDK 8. + Elasticsearch uses the Gradle wrapper for its build. You can execute Gradle using the wrapper via the `gradlew` script in the root of the repository. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 63012a2d99d13..269f4de196a91 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -56,7 +56,8 @@ import java.time.ZonedDateTime */ class BuildPlugin implements Plugin { - static final JavaVersion minimumJava = JavaVersion.VERSION_1_8 + static final JavaVersion minimumRuntimeVersion = JavaVersion.VERSION_1_8 + static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_9 @Override void apply(Project project) { @@ -93,20 +94,26 @@ class BuildPlugin implements Plugin { /** Performs checks on the build environment and prints information about the build environment. */ static void globalBuildInfo(Project project) { if (project.rootProject.ext.has('buildChecksDone') == false) { - String javaHome = findJavaHome() + String compilerJavaHome = findCompilerJavaHome() + String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) File gradleJavaHome = Jvm.current().javaHome String javaVendor = System.getProperty('java.vendor') String javaVersion = System.getProperty('java.version') String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" + " [${System.getProperty('java.vm.name')} ${System.getProperty('java.vm.version')}]" - String javaVersionDetails = gradleJavaVersionDetails - JavaVersion javaVersionEnum = JavaVersion.current() - if (new File(javaHome).canonicalPath != gradleJavaHome.canonicalPath) { - javaVersionDetails = findJavaVersionDetails(project, javaHome) - javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome)) - javaVendor = findJavaVendor(project, javaHome) - javaVersion = findJavaVersion(project, javaHome) + String compilerJavaVersionDetails = gradleJavaVersionDetails + JavaVersion compilerJavaVersionEnum = JavaVersion.current() + if (new File(compilerJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { + compilerJavaVersionDetails = findJavaVersionDetails(project, compilerJavaHome) + compilerJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, compilerJavaHome)) + } + + String runtimeJavaVersionDetails = gradleJavaVersionDetails + JavaVersion runtimeJavaVersionEnum = JavaVersion.current() + if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) { + runtimeJavaVersionDetails = findJavaVersionDetails(project, runtimeJavaHome) + runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome)) } // Build debugging info @@ -115,11 +122,13 @@ class BuildPlugin implements Plugin { println '=======================================' println " Gradle Version : ${project.gradle.gradleVersion}" println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})" - if (gradleJavaVersionDetails != javaVersionDetails) { + if (gradleJavaVersionDetails != compilerJavaVersionDetails || gradleJavaVersionDetails != runtimeJavaVersionDetails) { println " JDK Version (gradle) : ${gradleJavaVersionDetails}" println " JAVA_HOME (gradle) : ${gradleJavaHome}" - println " JDK Version (compile) : ${javaVersionDetails}" - println " JAVA_HOME (compile) : ${javaHome}" + println " JDK Version (compile) : ${compilerJavaVersionDetails}" + println " JAVA_HOME (compile) : ${compilerJavaHome}" + println " JDK Version (runtime) : ${runtimeJavaVersionDetails}" + println " JAVA_HOME (runtime) : ${runtimeJavaHome}" } else { println " JDK Version : ${gradleJavaVersionDetails}" println " JAVA_HOME : ${gradleJavaHome}" @@ -135,54 +144,47 @@ class BuildPlugin implements Plugin { } // enforce Java version - if (javaVersionEnum < minimumJava) { - throw new GradleException("Java ${minimumJava} or above is required to build Elasticsearch") + if (compilerJavaVersionEnum < minimumCompilerVersion) { + throw new GradleException("Java ${minimumCompilerVersion} or above is required to build Elasticsearch") } - // this block of code detecting buggy JDK 8 compiler versions can be removed when minimum Java version is incremented - assert minimumJava == JavaVersion.VERSION_1_8 : "Remove JDK compiler bug detection only applicable to JDK 8" - if (javaVersionEnum == JavaVersion.VERSION_1_8) { - if (Objects.equals("Oracle Corporation", javaVendor)) { - def matcher = javaVersion =~ /1\.8\.0(?:_(\d+))?/ - if (matcher.matches()) { - int update; - if (matcher.group(1) == null) { - update = 0 - } else { - update = matcher.group(1).toInteger() - } - if (update < 40) { - throw new GradleException("JDK ${javaVendor} ${javaVersion} has compiler bug JDK-8052388, update your JDK to at least 8u40") - } - } - } + if (runtimeJavaVersionEnum < minimumRuntimeVersion) { + throw new GradleException("Java ${minimumRuntimeVersion} or above is required to run Elasticsearch") } - project.rootProject.ext.javaHome = javaHome - project.rootProject.ext.javaVersion = javaVersionEnum + project.rootProject.ext.compilerJavaHome = compilerJavaHome + project.rootProject.ext.runtimeJavaHome = runtimeJavaHome + project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum + project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum project.rootProject.ext.buildChecksDone = true } - project.targetCompatibility = minimumJava - project.sourceCompatibility = minimumJava + project.targetCompatibility = minimumRuntimeVersion + project.sourceCompatibility = minimumRuntimeVersion // set java home for each project, so they dont have to find it in the root project - project.ext.javaHome = project.rootProject.ext.javaHome - project.ext.javaVersion = project.rootProject.ext.javaVersion + project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome + project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome + project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion + project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion } - /** Finds and enforces JAVA_HOME is set */ - private static String findJavaHome() { - String javaHome = System.getenv('JAVA_HOME') + private static String findCompilerJavaHome() { + final String javaHome = System.getenv('JAVA_HOME') if (javaHome == null) { if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) { - // intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with - javaHome = Jvm.current().javaHome + // IntelliJ does not set JAVA_HOME, so we use the JDK that Gradle was run with + return Jvm.current().javaHome } else { - throw new GradleException('JAVA_HOME must be set to build Elasticsearch') + throw new GradleException("JAVA_HOME must be set to build Elasticsearch") } } return javaHome } + private static String findRuntimeJavaHome(final String compilerJavaHome) { + assert compilerJavaHome != null + return System.getenv('RUNTIME_JAVA_HOME') ?: compilerJavaHome + } + /** Finds printable java version of the given JAVA_HOME */ private static String findJavaVersionDetails(Project project, String javaHome) { String versionInfoScript = 'print(' + @@ -412,7 +414,7 @@ class BuildPlugin implements Plugin { /** Adds compiler settings to the project */ static void configureCompile(Project project) { - if (project.javaVersion < JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { project.ext.compactProfile = 'compact3' } else { project.ext.compactProfile = 'full' @@ -422,7 +424,7 @@ class BuildPlugin implements Plugin { File gradleJavaHome = Jvm.current().javaHome // we fork because compiling lots of different classes in a shared jvm can eventually trigger GC overhead limitations options.fork = true - options.forkOptions.executable = new File(project.javaHome, 'bin/javac') + options.forkOptions.javaHome = new File(project.compilerJavaHome) options.forkOptions.memoryMaximumSize = "1g" if (project.targetCompatibility >= JavaVersion.VERSION_1_8) { // compile with compact 3 profile by default @@ -447,22 +449,18 @@ class BuildPlugin implements Plugin { options.encoding = 'UTF-8' options.incremental = true - - if (project.javaVersion == JavaVersion.VERSION_1_9) { - // hack until gradle supports java 9's new "--release" arg - assert minimumJava == JavaVersion.VERSION_1_8 - options.compilerArgs << '--release' << '8' - } + // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) + options.compilerArgs << '--release' << project.targetCompatibility.majorVersion } } } static void configureJavadoc(Project project) { project.tasks.withType(Javadoc) { - executable = new File(project.javaHome, 'bin/javadoc') + executable = new File(project.compilerJavaHome, 'bin/javadoc') } configureJavadocJar(project) - if (project.javaVersion == JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion == JavaVersion.VERSION_1_10) { project.tasks.withType(Javadoc) { it.enabled = false } project.tasks.getByName('javadocJar').each { it.enabled = false } } @@ -508,7 +506,7 @@ class BuildPlugin implements Plugin { 'X-Compile-Lucene-Version': VersionProperties.lucene, 'X-Compile-Elasticsearch-Snapshot': isSnapshot, 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), - 'Build-Java-Version': project.javaVersion) + 'Build-Java-Version': project.compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { logger.warn('Building without git revision id.') jarTask.manifest.attributes('Change': 'Unknown') @@ -545,7 +543,7 @@ class BuildPlugin implements Plugin { /** Returns a closure of common configuration shared by unit and integration tests. */ static Closure commonTestConfig(Project project) { return { - jvm "${project.javaHome}/bin/java" + jvm "${project.runtimeJavaHome}/bin/java" parallelism System.getProperty('tests.jvms', 'auto') ifNoTests 'fail' onNonEmptyWorkDirectory 'wipe' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index c3be764269358..f342a68707ed6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -169,7 +169,7 @@ public class PluginBuildPlugin extends BuildPlugin { Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), StandardCopyOption.REPLACE_EXISTING) - if (project.javaVersion < JavaVersion.VERSION_1_10) { + if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) { String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index f8eb0a63c96d7..656d5e0d35a9e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -42,7 +42,7 @@ public class JarHellTask extends LoggedExec { inputs.files(classpath) dependsOn(classpath) description = "Runs CheckJarHell on ${classpath}" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') doFirst({ /* JarHell doesn't like getting directories that don't exist but gradle isn't especially careful about that. So we have to do it diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy index 01ec6f7f5d3e2..87b73795604ab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -44,7 +44,7 @@ public class LoggerUsageTask extends LoggedExec { project.afterEvaluate { dependsOn(classpath) description = "Runs LoggerUsageCheck on ${classDirectories}" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') if (classDirectories == null) { // Default to main and test class files List files = [] diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy index 2711a0e38f23b..0feed8ccc4e04 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy @@ -80,7 +80,7 @@ public class NamingConventionsTask extends LoggedExec { FileCollection classpath = project.sourceSets.test.runtimeClasspath inputs.files(classpath) description = "Tests that test classes aren't misnamed or misplaced" - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') if (false == checkForTestsInMain) { /* This task is created by default for all subprojects with this * setting and there is no point in running it if the files don't diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 2b3b5abd82c9c..a64c39171a204 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -655,7 +655,7 @@ class ClusterFormationTasks { String pid = node.pidFile.getText('UTF-8') ByteArrayOutputStream output = new ByteArrayOutputStream() project.exec { - commandLine = ["${project.javaHome}/bin/jstack", pid] + commandLine = ["${project.runtimeJavaHome}/bin/jstack", pid] standardOutput = output } output.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } @@ -699,7 +699,7 @@ class ClusterFormationTasks { } private static File getJpsExecutableByName(Project project, String jpsExecutableName) { - return Paths.get(project.javaHome.toString(), "bin/" + jpsExecutableName).toFile() + return Paths.get(project.runtimeJavaHome.toString(), "bin/" + jpsExecutableName).toFile() } /** Adds a task to kill an elasticsearch node with the given pidfile */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 77da1c8ed7824..40a8ec230ac4e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -162,7 +162,7 @@ class NodeInfo { args.add("${esScript}") } - env = ['JAVA_HOME': project.javaHome] + env = ['JAVA_HOME': project.runtimeJavaHome] args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 93f6ffe2c9b77..a9a7bd1e0a247 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -20,6 +20,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version + import java.util.regex.Matcher /** @@ -118,29 +119,31 @@ if (project.hasProperty('bwcVersion')) { task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir + if (project.rootProject.ext.runtimeJavaVersion == JavaVersion.VERSION_1_8 && ["5.6", "6.0", "6.1"].contains(bwcBranch)) { + /* + * If runtime Java home is set to JDK 8 and we are building branches that are officially built with JDK 8, push this to JAVA_HOME for + * these builds. + */ + environment('JAVA_HOME', System.getenv('RUNTIME_JAVA_HOME')) + } if (Os.isFamily(Os.FAMILY_WINDOWS)) { executable 'cmd' args '/C', 'call', new File(checkoutDir, 'gradlew').toString() } else { - executable = new File(checkoutDir, 'gradlew').toString() + executable new File(checkoutDir, 'gradlew').toString() } - final ArrayList commandLineArgs = [ - ":distribution:deb:assemble", - ":distribution:rpm:assemble", - ":distribution:zip:assemble", - "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}"] + args ":distribution:deb:assemble", ":distribution:rpm:assemble", ":distribution:zip:assemble", "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}" final LogLevel logLevel = gradle.startParameter.logLevel if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - commandLineArgs << "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" } final String showStacktraceName = gradle.startParameter.showStacktrace.name() assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) if (showStacktraceName.equals("ALWAYS")) { - commandLineArgs << "--stacktrace" + args "--stacktrace" } else if (showStacktraceName.equals("ALWAYS_FULL")) { - commandLineArgs << "--full-stacktrace" + args "--full-stacktrace" } - args commandLineArgs doLast { List missing = [bwcDeb, bwcRpm, bwcZip].grep { file -> false == file.exists() diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index e5ba37d9cb0d6..6f177f7b7f5b2 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -67,7 +67,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = new File(project.javaHome, 'bin/keytool') + executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 88f25f72e72f9..05dc07ba31ad6 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -75,13 +75,11 @@ thirdPartyAudit.excludes = [ 'software.amazon.ion.system.IonSystemBuilder', 'software.amazon.ion.system.IonTextWriterBuilder', 'software.amazon.ion.system.IonWriterBuilder', + 'javax.xml.bind.DatatypeConverter', + 'javax.xml.bind.JAXBContext', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] - -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { - thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter'] -} diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index fa6f33a633cd4..82de9ba031b25 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -35,7 +35,7 @@ task createKey(type: LoggedExec) { project.delete(keystore.parentFile) keystore.parentFile.mkdirs() } - executable = new File(project.javaHome, 'bin/keytool') + executable = new File(project.runtimeJavaHome, 'bin/keytool') standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) args '-genkey', '-alias', 'test-node', diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index a57d8f880bcfc..3cd0311025044 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -495,6 +495,17 @@ thirdPartyAudit.excludes = [ 'de.l3s.boilerpipe.document.TextDocument', 'de.l3s.boilerpipe.extractors.DefaultExtractor', 'de.l3s.boilerpipe.sax.BoilerpipeHTMLContentHandler', + 'javax.activation.ActivationDataFlavor', + 'javax.activation.CommandMap', + 'javax.activation.DataContentHandler', + 'javax.activation.DataHandler', + 'javax.activation.DataSource', + 'javax.activation.FileDataSource', + 'javax.activation.MailcapCommandMap', + 'javax.xml.bind.DatatypeConverter', + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.Unmarshaller', 'javax.mail.BodyPart', 'javax.mail.Header', 'javax.mail.Message$RecipientType', @@ -2091,21 +2102,3 @@ thirdPartyAudit.excludes = [ 'ucar.nc2.Variable', 'ucar.nc2.dataset.NetcdfDataset' ] - -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { - thirdPartyAudit.excludes += [ - 'javax.activation.ActivationDataFlavor', - 'javax.activation.CommandMap', - 'javax.activation.DataContentHandler', - 'javax.activation.DataHandler', - 'javax.activation.DataSource', - 'javax.activation.FileDataSource', - 'javax.activation.MailcapCommandMap', - 'javax.xml.bind.DatatypeConverter', - 'javax.xml.bind.JAXBContext', - 'javax.xml.bind.JAXBElement', - 'javax.xml.bind.Marshaller', - 'javax.xml.bind.Unmarshaller', - 'javax.xml.bind.helpers.DefaultValidationEventHandler', - ] -} diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index 78e54d8bc817e..7a229a396f7d2 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -38,7 +38,7 @@ dependencies { task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.exampleFixture - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') args '-cp', "${ -> project.configurations.exampleFixture.asPath }", 'example.ExampleTestFixture', baseDir diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 19ca4c0148256..876741260f82b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -114,7 +114,7 @@ for (String principal : principals) { for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { project.tasks.create(fixtureName, org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.hdfsFixture - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" final List miniHDFSArgs = [] @@ -124,7 +124,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', dependsOn krb5kdcFixture, krb5AddPrincipals Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf") miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5Config}"); - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED') } } @@ -170,7 +170,7 @@ project.afterEvaluate { restIntegTestTask.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}") jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}" - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } @@ -181,7 +181,7 @@ project.afterEvaluate { restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" restIntegTestTaskRunner.jvmArg "-Djava.security.krb5.conf=${krb5conf}" - if (project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_9) { + if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } @@ -353,6 +353,7 @@ thirdPartyAudit.excludes = [ 'io.netty.handler.stream.ChunkedWriteHandler', 'io.netty.util.concurrent.GlobalEventExecutor', 'io.netty.util.ReferenceCountUtil', + 'javax.xml.bind.annotation.adapters.HexBinaryAdapter', 'javax.ws.rs.core.Context', 'javax.ws.rs.core.MediaType', 'javax.ws.rs.core.MultivaluedMap', @@ -567,7 +568,3 @@ thirdPartyAudit.excludes = [ 'com.squareup.okhttp.Response', 'com.squareup.okhttp.ResponseBody' ] - -if (JavaVersion.current() > JavaVersion.VERSION_1_8) { - thirdPartyAudit.excludes += ['javax.xml.bind.annotation.adapters.HexBinaryAdapter'] -} diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle index adff0361e29b8..c9388c42bf54a 100644 --- a/qa/reindex-from-old/build.gradle +++ b/qa/reindex-from-old/build.gradle @@ -51,7 +51,7 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } -if (project.javaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { +if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { /* We can't run the dependencies with Java 9 so for now we'll skip the whole * thing. We can't get the pid files in windows so we skip that as well.... */ integTest.enabled = false @@ -73,8 +73,9 @@ if (project.javaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WIND type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.oldesFixture dependsOn unzip - executable = new File(project.javaHome, 'bin/java') + executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + env 'JAVA_HOME', project.runtimeJavaHome args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, From e5a698447b9a474e165ff78f74495c5a9fd1e454 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 16 Jan 2018 14:11:03 -0500 Subject: [PATCH 31/94] Move the multi-get response tests to server This test file was accidentally pushed to core instead of server. This commit moves this file to its proper location. --- .../java/org/elasticsearch/action/get/MultiGetResponseTests.java | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {core => server}/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java (100%) diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java similarity index 100% rename from core/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java rename to server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java From 045dd4ad4870887666c610b880a7f1b98956575b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 16 Jan 2018 15:10:29 -0500 Subject: [PATCH 32/94] Introduce multi-release JAR This commit introduces the ability for the core Elasticsearch JAR to be a multi-release JAR containing code that is compiled for JDK 8 and code that is compiled for JDK 9. At runtime, a JDK 8 JVM will ignore the JDK 9 compiled classfiles, and a JDK 9 JVM will use the JDK 9 compiled classfiles instead of the JDK 8 compiled classfiles. With this work, we utilize the new JDK 9 API for obtaining the PID of the running JVM, instead of relying on a hack. For now, we want to keep IDEs on JDK 8 so when the build is in an IDE we ignore the JDK 9 source set (as otherwise the IDE would give compilation errors). However, with this change, running Gradle from the command-line now requires JAVA_HOME and JAVA_9_HOME to be set. This will require follow-up work in our CI infrastructure and our release builds to accommodate this change. Relates #28051 --- .../elasticsearch/gradle/BuildPlugin.groovy | 9 ++-- server/build.gradle | 27 ++++++++++ .../elasticsearch/monitor/jvm/JvmInfo.java | 12 +---- .../org/elasticsearch/monitor/jvm/JvmPid.java | 49 +++++++++++++++++++ .../org/elasticsearch/monitor/jvm/JvmPid.java | 30 ++++++++++++ 5 files changed, 113 insertions(+), 14 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java create mode 100644 server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 269f4de196a91..0c76ce4fd3a24 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -158,8 +158,10 @@ class BuildPlugin implements Plugin { project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum project.rootProject.ext.buildChecksDone = true } + project.targetCompatibility = minimumRuntimeVersion project.sourceCompatibility = minimumRuntimeVersion + // set java home for each project, so they dont have to find it in the root project project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome @@ -421,12 +423,12 @@ class BuildPlugin implements Plugin { } project.afterEvaluate { project.tasks.withType(JavaCompile) { - File gradleJavaHome = Jvm.current().javaHome + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) // we fork because compiling lots of different classes in a shared jvm can eventually trigger GC overhead limitations options.fork = true options.forkOptions.javaHome = new File(project.compilerJavaHome) options.forkOptions.memoryMaximumSize = "1g" - if (project.targetCompatibility >= JavaVersion.VERSION_1_8) { + if (targetCompatibilityVersion == JavaVersion.VERSION_1_8) { // compile with compact 3 profile by default // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE if (project.compactProfile != 'full') { @@ -449,8 +451,9 @@ class BuildPlugin implements Plugin { options.encoding = 'UTF-8' options.incremental = true + // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) - options.compilerArgs << '--release' << project.targetCompatibility.majorVersion + options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } } } diff --git a/server/build.gradle b/server/build.gradle index 4f69c2ee159b5..327f267ee8f59 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,29 @@ publishing { archivesBaseName = 'elasticsearch' +// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 9 so we do not include this source set in our IDEs +if (!isEclipse && !isIdea) { + sourceSets { + java9 { + java { + srcDirs = ['src/main/java9'] + } + } + } + + compileJava9Java { + sourceCompatibility = 9 + targetCompatibility = 9 + } + + jar { + into('META-INF/versions/9') { + from sourceSets.java9.output + } + manifest.attributes('Multi-Release': 'true') + } +} + dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" @@ -94,6 +117,10 @@ dependencies { // repackaged jna with native bits linked against all elastic supported platforms compile "org.elasticsearch:jna:${versions.jna}" + if (!isEclipse && !isIdea) { + java9Compile sourceSets.main.output + } + if (isEclipse == false || project.path == ":server-tests") { testCompile("org.elasticsearch.test:framework:${version}") { // tests use the locally compiled version of server diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index c52cce0780863..87e15b910f6ad 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -47,16 +47,6 @@ public class JvmInfo implements Writeable, ToXContentFragment { RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean(); MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); - // returns the @ - long pid; - String xPid = runtimeMXBean.getName(); - try { - xPid = xPid.split("@")[0]; - pid = Long.parseLong(xPid); - } catch (Exception e) { - pid = -1; - } - long heapInit = memoryMXBean.getHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getInit(); long heapMax = memoryMXBean.getHeapMemoryUsage().getMax() < 0 ? 0 : memoryMXBean.getHeapMemoryUsage().getMax(); long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memoryMXBean.getNonHeapMemoryUsage().getInit(); @@ -160,7 +150,7 @@ public class JvmInfo implements Writeable, ToXContentFragment { } - INSTANCE = new JvmInfo(pid, System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), + INSTANCE = new JvmInfo(JvmPid.getPid(), System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), runtimeMXBean.getVmVendor(), runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize, mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, onOutOfMemoryError, useCompressedOops, useG1GC, useSerialGC); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java new file mode 100644 index 0000000000000..2b1b2a1df478a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmPid.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.logging.Loggers; + +import java.lang.management.ManagementFactory; + +class JvmPid { + + private static final long PID; + + static long getPid() { + return PID; + } + + static { + PID = initializePid(); + } + + private static long initializePid() { + final String name = ManagementFactory.getRuntimeMXBean().getName(); + try { + return Long.parseLong(name.split("@")[0]); + } catch (final NumberFormatException e) { + Loggers.getLogger(JvmPid.class).debug(new ParameterizedMessage("failed parsing PID from [{}]", name), e); + return -1; + } + } + +} diff --git a/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java new file mode 100644 index 0000000000000..5ce8959601798 --- /dev/null +++ b/server/src/main/java9/org/elasticsearch/monitor/jvm/JvmPid.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.jvm; + +import java.lang.ProcessHandle; + +class JvmPid { + + static long getPid() { + return ProcessHandle.current().pid(); + } + +} From 4ec0569a19a03667c264948ad3b2e648fe5466d7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 16 Jan 2018 16:40:03 -0500 Subject: [PATCH 33/94] Handle 5.6.6 and 6.1.2 release Add new version constants for 5.6.6 and 6.1.2 release. --- server/src/main/java/org/elasticsearch/Version.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8a4bc0752be3f..df748e7959254 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -109,6 +109,8 @@ public class Version implements Comparable { public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_6_ID = 5060699; public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_7_ID = 5060799; + public static final Version V_5_6_7 = new Version(V_5_6_7_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -142,6 +144,8 @@ public class Version implements Comparable { public static final Version V_6_1_1 = new Version(V_6_1_1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_2_ID = 6010299; public static final Version V_6_1_2 = new Version(V_6_1_2_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); + public static final int V_6_1_3_ID = 6010399; + public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; @@ -164,6 +168,8 @@ public static Version fromId(int id) { return V_7_0_0_alpha1; case V_6_2_0_ID: return V_6_2_0; + case V_6_1_3_ID: + return V_6_1_3; case V_6_1_2_ID: return V_6_1_2; case V_6_1_1_ID: @@ -188,6 +194,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_7_ID: + return V_5_6_7; case V_5_6_6_ID: return V_5_6_6; case V_5_6_5_ID: From f2cd580332f68146c702190ca159fe3b46491529 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 16 Jan 2018 16:34:44 -0700 Subject: [PATCH 34/94] Remove duplicated javadoc `fieldType` param --- .../org/elasticsearch/index/mapper/MetadataFieldMapper.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 0833e8f33f30f..264c2abd56820 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -37,8 +37,6 @@ public interface TypeParser extends Mapper.TypeParser { /** * Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed. - * @param fieldType null if this is the first root mapper on this index, the existing - * fieldType for this index otherwise * @param fieldType the existing field type for this meta mapper on the current index * or null if this is the first type being introduced * @param parserContext context that may be useful to build the field like analyzers From aded32f48fc8dae8c67736e7c043a5aea665cfcd Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 16 Jan 2018 22:59:29 -0500 Subject: [PATCH 35/94] Fix third-party audit tasks on JDK 8 This one is interesting. The third party audit task runs inside the Gradle JVM. This means that if Gradle is started on JDK 8, the third party audit tasks will fail as a result of the changes to support building Elasticsearch with the JDK 9 compiler. This commit reverts the third party audit changes to support running this task when Gradle is started with JDK 8. Relates #28256 --- plugins/discovery-ec2/build.gradle | 9 +++++++-- plugins/ingest-attachment/build.gradle | 27 +++++++++++++++----------- plugins/repository-hdfs/build.gradle | 5 ++++- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 05dc07ba31ad6..7daf944f81898 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -75,11 +75,16 @@ thirdPartyAudit.excludes = [ 'software.amazon.ion.system.IonSystemBuilder', 'software.amazon.ion.system.IonTextWriterBuilder', 'software.amazon.ion.system.IonWriterBuilder', - 'javax.xml.bind.DatatypeConverter', - 'javax.xml.bind.JAXBContext', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] + +if (JavaVersion.current() > JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + 'javax.xml.bind.DatatypeConverter', + 'javax.xml.bind.JAXBContext' + ] +} diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 3cd0311025044..3bca078bd59c4 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -495,17 +495,6 @@ thirdPartyAudit.excludes = [ 'de.l3s.boilerpipe.document.TextDocument', 'de.l3s.boilerpipe.extractors.DefaultExtractor', 'de.l3s.boilerpipe.sax.BoilerpipeHTMLContentHandler', - 'javax.activation.ActivationDataFlavor', - 'javax.activation.CommandMap', - 'javax.activation.DataContentHandler', - 'javax.activation.DataHandler', - 'javax.activation.DataSource', - 'javax.activation.FileDataSource', - 'javax.activation.MailcapCommandMap', - 'javax.xml.bind.DatatypeConverter', - 'javax.xml.bind.JAXBContext', - 'javax.xml.bind.JAXBElement', - 'javax.xml.bind.Unmarshaller', 'javax.mail.BodyPart', 'javax.mail.Header', 'javax.mail.Message$RecipientType', @@ -2102,3 +2091,19 @@ thirdPartyAudit.excludes = [ 'ucar.nc2.Variable', 'ucar.nc2.dataset.NetcdfDataset' ] + +if (JavaVersion.current() > JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + 'javax.activation.ActivationDataFlavor', + 'javax.activation.CommandMap', + 'javax.activation.DataContentHandler', + 'javax.activation.DataHandler', + 'javax.activation.DataSource', + 'javax.activation.FileDataSource', + 'javax.activation.MailcapCommandMap', + 'javax.xml.bind.DatatypeConverter', + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.Unmarshaller' + ] +} diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 876741260f82b..631157a7e175b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -353,7 +353,6 @@ thirdPartyAudit.excludes = [ 'io.netty.handler.stream.ChunkedWriteHandler', 'io.netty.util.concurrent.GlobalEventExecutor', 'io.netty.util.ReferenceCountUtil', - 'javax.xml.bind.annotation.adapters.HexBinaryAdapter', 'javax.ws.rs.core.Context', 'javax.ws.rs.core.MediaType', 'javax.ws.rs.core.MultivaluedMap', @@ -568,3 +567,7 @@ thirdPartyAudit.excludes = [ 'com.squareup.okhttp.Response', 'com.squareup.okhttp.ResponseBody' ] + +if (JavaVersion.current() > JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += ['javax.xml.bind.annotation.adapters.HexBinaryAdapter'] +} From d32cb8089b3d29693f050064fab4b6d2c34b00b6 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 17 Jan 2018 09:14:44 +0100 Subject: [PATCH 36/94] Tests: Decrease log level for adding a header value (#28246) This logging message adds considerable noise to many REST tests, if you are using something like HTTP basic auth in every API call or set any custom header. The log level moves from info to debug, so can still be seen if wanted. --- .../org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 6e2f43ae75281..01fd3bad0f3e1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -165,7 +165,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params Header[] requestHeaders = new Header[headers.size()]; int index = 0; for (Map.Entry header : headers.entrySet()) { - logger.info("Adding header {} with value {}", header.getKey(), header.getValue()); + logger.debug("Adding header {} with value {}", header.getKey(), header.getValue()); requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); } From b98514c6d905e70853d5316f823df38974f98739 Mon Sep 17 00:00:00 2001 From: olcbean <26058559+olcbean@users.noreply.github.com> Date: Wed, 17 Jan 2018 11:47:08 +0100 Subject: [PATCH 37/94] Add Close Index API to the high level REST client (#27734) Add support for _close endpoint to the high level REST client Relates to #27205 --- .../elasticsearch/client/IndicesClient.java | 35 ++++- .../org/elasticsearch/client/Request.java | 13 ++ .../client/RestHighLevelClient.java | 2 - .../elasticsearch/client/IndicesClientIT.java | 78 +++++----- .../elasticsearch/client/RequestTests.java | 56 ++++--- .../IndicesClientDocumentationIT.java | 138 ++++++++++++++++++ .../high-level/apis/close_index.asciidoc | 70 +++++++++ .../high-level/apis/createindex.asciidoc | 8 +- docs/java-rest/high-level/apis/index.asciidoc | 13 ++ .../high-level/apis/open_index.asciidoc | 81 ++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../indices/close/CloseIndexResponse.java | 24 ++- .../close/TransportCloseIndexAction.java | 1 - .../admin/indices/open/OpenIndexResponse.java | 2 +- .../close/CloseIndexResponseTests.java | 60 ++++++++ 15 files changed, 512 insertions(+), 71 deletions(-) create mode 100644 docs/java-rest/high-level/apis/close_index.asciidoc create mode 100644 docs/java-rest/high-level/apis/open_index.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 57dafbba50994..4940267e85c22 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -21,6 +21,8 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -51,7 +53,7 @@ public final class IndicesClient { */ public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - Collections.emptySet(), headers); + Collections.emptySet(), headers); } /** @@ -60,10 +62,9 @@ public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, He * See * Delete Index API on elastic.co */ - public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, - Header... headers) { + public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, Collections.emptySet(), headers); } /** @@ -83,10 +84,9 @@ public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, He * See * Create Index API on elastic.co */ - public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener, - Header... headers) { + public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, Collections.emptySet(), headers); } /** @@ -111,4 +111,25 @@ public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener + * See + * Close Index API on elastic.co + */ + public CloseIndexResponse closeIndex(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously closes an index using the Close Index API + *

+ * See + * Close Index API on elastic.co + */ + public void closeIndexAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, + listener, Collections.emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index d35db1c637d4c..e55204c3d9473 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -153,6 +154,18 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request closeIndex(CloseIndexRequest closeIndexRequest) { + String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close"); + + Params parameters = Params.builder(); + + parameters.withTimeout(closeIndexRequest.timeout()); + parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); + parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); + + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, ""); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index cad7449c689ca..9fb53a54d8c0a 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5f356c4c29f5e..361b60a5218cf 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -21,6 +21,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -28,21 +30,18 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.hamcrest.Matchers.equalTo; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -136,57 +135,68 @@ public void testDeleteIndex() throws IOException { } public void testOpenExistingIndex() throws IOException { - String[] indices = randomIndices(1, 5); - for (String index : indices) { - createIndex(index); - closeIndex(index); - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); - assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); - assertThat(exception.getMessage().contains(index), equalTo(true)); - } - - OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); + String index = "index"; + createIndex(index); + closeIndex(index); + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); + + OpenIndexRequest openIndexRequest = new OpenIndexRequest(index); OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync); assertTrue(openIndexResponse.isAcknowledged()); - for (String index : indices) { - Response response = client().performRequest("GET", index + "/_search"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - } + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } public void testOpenNonExistentIndex() throws IOException { - String[] nonExistentIndices = randomIndices(1, 5); - for (String nonExistentIndex : nonExistentIndices) { - assertFalse(indexExists(nonExistentIndex)); - } + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); - OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); - OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); - OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices); + OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } - private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) { - int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - } - return indices; + public void testCloseExistingIndex() throws IOException { + String index = "index"; + createIndex(index); + Response response = client().performRequest("GET", index + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); + CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::closeIndex, + highLevelClient().indices()::closeIndexAsync); + assertTrue(closeIndexResponse.isAcknowledged()); + + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus())); + assertThat(exception.getMessage().contains(index), equalTo(true)); + } + + public void testCloseNonExistentIndex() throws IOException { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(closeIndexRequest, highLevelClient().indices()::closeIndex, highLevelClient().indices()::closeIndexAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); } private static void createIndex(String index) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index acb27fff7e2ef..bfc868707a8c3 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -25,6 +25,7 @@ import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -325,17 +326,10 @@ public void testCreateIndex() throws IOException { } public void testDeleteIndex() { - DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(); - - int numIndices = randomIntBetween(0, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } - deleteIndexRequest.indices(indices); + String[] indices = randomIndicesNames(0, 5); + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); Map expectedParams = new HashMap<>(); - setRandomTimeout(deleteIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomMasterTimeout(deleteIndexRequest, expectedParams); @@ -349,12 +343,8 @@ public void testDeleteIndex() { } public void testOpenIndex() { - OpenIndexRequest openIndexRequest = new OpenIndexRequest(); - int numIndices = randomIntBetween(1, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } + String[] indices = randomIndicesNames(1, 5); + OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices); openIndexRequest.indices(indices); Map expectedParams = new HashMap<>(); @@ -371,6 +361,23 @@ public void testOpenIndex() { assertThat(request.getEntity(), nullValue()); } + public void testCloseIndex() { + String[] indices = randomIndicesNames(1, 5); + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(indices); + + Map expectedParams = new HashMap<>(); + setRandomTimeout(closeIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(closeIndexRequest, expectedParams); + setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); + + Request request = Request.closeIndex(closeIndexRequest); + StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getMethod(), equalTo("POST")); + assertThat(request.getEntity(), nullValue()); + } + public void testIndex() throws IOException { String index = randomAlphaOfLengthBetween(3, 10); String type = randomAlphaOfLengthBetween(3, 10); @@ -748,13 +755,9 @@ public void testBulkWithDifferentContentTypes() throws IOException { } public void testSearch() throws Exception { - SearchRequest searchRequest = new SearchRequest(); - int numIndices = randomIntBetween(0, 5); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); - } - searchRequest.indices(indices); + String[] indices = randomIndicesNames(0, 5); + SearchRequest searchRequest = new SearchRequest(indices); + int numTypes = randomIntBetween(0, 5); String[] types = new String[numTypes]; for (int i = 0; i < numTypes; i++) { @@ -1130,4 +1133,13 @@ private static String randomFields(String[] fields) { } return excludesParam.toString(); } + + private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + return indices; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 42d19fab82fe9..dd7e53eaa954c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -22,10 +22,14 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -224,4 +228,138 @@ public void onFailure(Exception e) { } } + public void testOpenIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::open-index-request + OpenIndexRequest request = new OpenIndexRequest("index"); // <1> + // end::open-index-request + + // tag::open-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::open-index-request-timeout + // tag::open-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::open-index-request-masterTimeout + // tag::open-index-request-waitForActiveShards + request.waitForActiveShards(2); // <1> + request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> + // end::open-index-request-waitForActiveShards + + + // tag::open-index-request-indicesOptions + request.indicesOptions(IndicesOptions.strictExpandOpen()); // <1> + // end::open-index-request-indicesOptions + + // tag::open-index-execute + OpenIndexResponse openIndexResponse = client.indices().openIndex(request); + // end::open-index-execute + + // tag::open-index-response + boolean acknowledged = openIndexResponse.isAcknowledged(); // <1> + boolean shardsAcked = openIndexResponse.isShardsAcknowledged(); // <2> + // end::open-index-response + assertTrue(acknowledged); + assertTrue(shardsAcked); + + // tag::open-index-execute-async + client.indices().openIndexAsync(request, new ActionListener() { + @Override + public void onResponse(OpenIndexResponse openIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::open-index-execute-async + } + + { + // tag::open-index-notfound + try { + OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); + client.indices().openIndex(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::open-index-notfound + } + } + + public void testCloseIndex() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::close-index-request + CloseIndexRequest request = new CloseIndexRequest("index"); // <1> + // end::close-index-request + + // tag::close-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::close-index-request-timeout + // tag::close-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::close-index-request-masterTimeout + + // tag::close-index-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::close-index-request-indicesOptions + + // tag::close-index-execute + CloseIndexResponse closeIndexResponse = client.indices().closeIndex(request); + // end::close-index-execute + + // tag::close-index-response + boolean acknowledged = closeIndexResponse.isAcknowledged(); // <1> + // end::close-index-response + assertTrue(acknowledged); + + // tag::close-index-execute-async + client.indices().closeIndexAsync(request, new ActionListener() { + @Override + public void onResponse(CloseIndexResponse closeIndexResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::close-index-execute-async + } + + { + // tag::close-index-notfound + try { + CloseIndexRequest request = new CloseIndexRequest("does_not_exist"); + client.indices().closeIndex(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::close-index-notfound + } + } } diff --git a/docs/java-rest/high-level/apis/close_index.asciidoc b/docs/java-rest/high-level/apis/close_index.asciidoc new file mode 100644 index 0000000000000..a4d0f6383532e --- /dev/null +++ b/docs/java-rest/high-level/apis/close_index.asciidoc @@ -0,0 +1,70 @@ +[[java-rest-high-close-index]] +=== Close Index API + +[[java-rest-high-close-index-request]] +==== Close Index Request + +A `CloseIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request] +-------------------------------------------------- +<1> The index to close + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is closed +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-close-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute] +-------------------------------------------------- + +[[java-rest-high-close-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-close-index-response]] +==== Close Index Response + +The returned `CloseIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[close-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/apis/createindex.asciidoc b/docs/java-rest/high-level/apis/createindex.asciidoc index ebd9158e19387..bfc7794c8f9a0 100644 --- a/docs/java-rest/high-level/apis/createindex.asciidoc +++ b/docs/java-rest/high-level/apis/createindex.asciidoc @@ -48,7 +48,7 @@ The following arguments can optionally be provided: include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-timeout] -------------------------------------------------- <1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the index creatiom as a `String` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -61,8 +61,10 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-reque -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[create-index-request-waitForActiveShards] -------------------------------------------------- -<1> The number of active shard copies to wait for before proceeding with the operation, as an `int`. -<2> The number of active shard copies to wait for before proceeding with the operation, as an `ActiveShardCount`. +<1> The number of active shard copies to wait for before the create index API returns a +response, as an `int`. +<2> The number of active shard copies to wait for before the create index API returns a +response, as an `ActiveShardCount`. [[java-rest-high-create-index-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index 2312f28372060..f6da998a8476f 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -1,10 +1,23 @@ include::createindex.asciidoc[] + include::deleteindex.asciidoc[] + +include::open_index.asciidoc[] + +include::close_index.asciidoc[] + include::_index.asciidoc[] + include::get.asciidoc[] + include::delete.asciidoc[] + include::update.asciidoc[] + include::bulk.asciidoc[] + include::search.asciidoc[] + include::scroll.asciidoc[] + include::main.asciidoc[] diff --git a/docs/java-rest/high-level/apis/open_index.asciidoc b/docs/java-rest/high-level/apis/open_index.asciidoc new file mode 100644 index 0000000000000..a30e62123a814 --- /dev/null +++ b/docs/java-rest/high-level/apis/open_index.asciidoc @@ -0,0 +1,81 @@ +[[java-rest-high-open-index]] +=== Open Index API + +[[java-rest-high-open-index-request]] +==== Open Index Request + +An `OpenIndexRequest` requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request] +-------------------------------------------------- +<1> The index to open + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-waitForActiveShards] +-------------------------------------------------- +<1> The number of active shard copies to wait for before the open index API +returns a response, as an `int`. +<2> The number of active shard copies to wait for before the open index API +returns a response, as an `ActiveShardCount`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-open-index-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute] +-------------------------------------------------- + +[[java-rest-high-open-index-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-open-index-response]] +==== Open Index Response + +The returned `OpenIndexResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[open-index-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates whether the requisite number of shard copies were started for +each shard in the index before timing out diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 7a6b55619f77a..fa71b62d64e70 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -6,6 +6,8 @@ The Java High Level REST Client supports the following APIs: Indices APIs:: * <> * <> +* <> +* <> Single document APIs:: * <> diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index b85962c0f55ed..4607586d9fa91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -22,13 +22,23 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * A response for a close index action. */ -public class CloseIndexResponse extends AcknowledgedResponse { +public class CloseIndexResponse extends AcknowledgedResponse implements ToXContentObject { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("close_index", true, + args -> new CloseIndexResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } CloseIndexResponse() { } @@ -48,4 +58,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static CloseIndexResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 244b8a24b9b67..362f54b74ab36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 95fef9fc65344..4e98c60265c76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -37,7 +37,7 @@ /** * A response for a open index action. */ -public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { +public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject { private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged"; private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java new file mode 100644 index 0000000000000..e616e0383118d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.close; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.CoreMatchers.equalTo; + +public class CloseIndexResponseTests extends ESTestCase { + + public void testFromToXContent() throws IOException { + final CloseIndexResponse closeIndexResponse = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(closeIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (randomBoolean()) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + + CloseIndexResponse parsedCloseIndexResponse; + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + parsedCloseIndexResponse = CloseIndexResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + assertThat(parsedCloseIndexResponse.isAcknowledged(), equalTo(closeIndexResponse.isAcknowledged())); + } + + private static CloseIndexResponse createTestItem() { + boolean acknowledged = randomBoolean(); + return new CloseIndexResponse(acknowledged); + } +} From 6256c330c0d4c5610ab8a41d7fceea8fec003dfe Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 17 Jan 2018 13:35:29 +0100 Subject: [PATCH 38/94] [Test] Wait for no relocating shards in indices.stats/13_fields tests MixedClusterClientYamlTestSuiteIT sometimes fails when executing the indices.stats/13_fields/* REST tests. It does not reproduce locally but the execution logs show that it failed when a shard is relocating during the set up execution. This commit change the set up so that it now waits for all shards to be active before executing the tests. closes #26732, #27146 --- .../test/indices.stats/13_fields.yml | 44 +++++++++++++++---- .../fielddata/IndexFieldDataService.java | 2 +- .../index/fielddata/ShardFieldData.java | 6 +-- 3 files changed, 40 insertions(+), 12 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml index 7be97cda1fe0c..25f0e20d0d0a9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml @@ -4,6 +4,7 @@ setup: - do: indices.create: index: test1 + wait_for_active_shards: all body: mappings: bar: @@ -20,6 +21,12 @@ setup: fields: completion: type: completion + + - do: + cluster.health: + wait_for_active_shards: all + wait_for_no_relocating_shards: true + - do: index: index: test1 @@ -29,10 +36,10 @@ setup: - do: index: - index: test2 - type: baz - id: 1 - body: { "bar": "bar", "baz": "baz" } + index: test1 + type: bar + id: 2 + body: { "bar": "foo", "baz": "foo" } - do: indices.refresh: {} @@ -57,18 +64,17 @@ setup: completion: field: baz.completion - - do: - indices.refresh: {} - - do: search: - sort: bar,baz + body: + sort: [ "bar", "baz" ] --- "Fields - blank": - do: indices.stats: {} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields - gt: { _all.total.completion.size_in_bytes: 0 } @@ -79,6 +85,7 @@ setup: - do: indices.stats: { fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -90,6 +97,7 @@ setup: - do: indices.stats: { fields: "bar,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -102,6 +110,7 @@ setup: - do: indices.stats: { fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } @@ -114,6 +123,7 @@ setup: - do: indices.stats: { fields: "bar*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -126,6 +136,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -138,6 +149,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -148,6 +160,7 @@ setup: - do: indices.stats: { fields: "bar*", metric: completion } + - match: { _shards.failed: 0} - is_false: _all.total.fielddata - gt: { _all.total.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } @@ -158,6 +171,7 @@ setup: - do: indices.stats: { fields: "bar*" , metric: [ completion, fielddata, search ]} + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz @@ -170,6 +184,7 @@ setup: - do: indices.stats: { fielddata_fields: bar } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -179,6 +194,7 @@ setup: - do: indices.stats: { fielddata_fields: "bar,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -188,6 +204,7 @@ setup: - do: indices.stats: { fielddata_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - gt: { _all.total.fielddata.fields.baz.memory_size_in_bytes: 0 } - is_false: _all.total.completion.fields @@ -197,6 +214,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r" } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -207,6 +225,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -216,6 +235,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: fielddata } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -226,6 +246,7 @@ setup: - do: indices.stats: { fielddata_fields: "*r", metric: [ fielddata, search] } + - match: { _shards.failed: 0} - gt: { _all.total.fielddata.fields.bar.memory_size_in_bytes: 0 } - is_false: _all.total.fielddata.fields.baz - is_false: _all.total.completion.fields @@ -236,6 +257,7 @@ setup: - do: indices.stats: { completion_fields: bar.completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -245,6 +267,7 @@ setup: - do: indices.stats: { completion_fields: "bar.completion,baz,baz.completion" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -254,6 +277,7 @@ setup: - do: indices.stats: { completion_fields: "*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - gt: { _all.total.completion.fields.baz\.completion.size_in_bytes: 0 } - is_false: _all.total.fielddata.fields @@ -263,6 +287,7 @@ setup: - do: indices.stats: { completion_fields: "*r*" } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -272,6 +297,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: _all } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -281,6 +307,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: completion } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields @@ -290,6 +317,7 @@ setup: - do: indices.stats: { completion_fields: "*r*", metric: [ completion, search ] } + - match: { _shards.failed: 0} - gt: { _all.total.completion.fields.bar\.completion.size_in_bytes: 0 } - is_false: _all.total.completion.fields.baz\.completion - is_false: _all.total.fielddata.fields diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 8194b888615e7..a9d8df1cb264f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -48,7 +48,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo case "none": return s; default: - throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); + throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]"); } }, Property.IndexScope); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index 6dd9552b6903b..ed9dd14328dc5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -32,9 +32,9 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); - final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); + private final CounterMetric evictionsMetric = new CounterMetric(); + private final CounterMetric totalMetric = new CounterMetric(); + private final ConcurrentMap perFieldTotals = ConcurrentCollections.newConcurrentMap(); public FieldDataStats stats(String... fields) { ObjectLongHashMap fieldTotals = null; From 9bd7f2c65bb125023007dfea2934e223499ad2d0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 17 Jan 2018 12:42:20 +0000 Subject: [PATCH 39/94] Improve wording in deprecation message (#28259) --- .../index/analysis/SynonymTokenFilterFactory.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index bf9045c5d00e1..37e96cbb54a57 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -47,8 +47,8 @@ public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, A if (settings.get("ignore_case") != null) { deprecationLogger.deprecated( - "This tokenize synonyms with whatever tokenizer and token filters appear before it in the chain. " + - "If you need ignore case with this filter, you should set lowercase filter before this"); + "The ignore_case option on the synonym_graph filter is deprecated. " + + "Instead, insert a lowercase filter in the filter chain before the synonym_graph filter."); } this.expand = settings.getAsBoolean("expand", true); From 707d8d6fe6f5f196dc073339216ebcef21722393 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 17 Jan 2018 14:58:52 +0100 Subject: [PATCH 40/94] Dependencies: Update joda time to 2.9.9 (#28261) --- server/build.gradle | 2 +- server/licenses/joda-time-2.9.5.jar.sha1 | 1 - server/licenses/joda-time-2.9.9.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 server/licenses/joda-time-2.9.5.jar.sha1 create mode 100644 server/licenses/joda-time-2.9.9.jar.sha1 diff --git a/server/build.gradle b/server/build.gradle index 327f267ee8f59..c11c88dfc6e82 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -90,7 +90,7 @@ dependencies { compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.9.5' + compile 'joda-time:joda-time:2.9.9' // json and yaml compile "org.yaml:snakeyaml:${versions.snakeyaml}" diff --git a/server/licenses/joda-time-2.9.5.jar.sha1 b/server/licenses/joda-time-2.9.5.jar.sha1 deleted file mode 100644 index ecf1c781556ee..0000000000000 --- a/server/licenses/joda-time-2.9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f01da7306363fad2028b916f3eab926262de928 \ No newline at end of file diff --git a/server/licenses/joda-time-2.9.9.jar.sha1 b/server/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 0000000000000..4009932ea3beb --- /dev/null +++ b/server/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file From 1f66672d6fe43ae32f2f50a08161f56d373eb19c Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 17 Jan 2018 15:05:07 +0100 Subject: [PATCH 41/94] [Test] Fix indices.stats/13_fields Remove the wait_for_active_shards: all added by commit 6256c since it does not work when the cluster has 1 node only. --- .../rest-api-spec/test/indices.stats/13_fields.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml index 25f0e20d0d0a9..82655c5778d27 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/13_fields.yml @@ -6,6 +6,12 @@ setup: index: test1 wait_for_active_shards: all body: + settings: + # Limit the number of shards so that shards are unlikely + # to be relocated or being initialized between the test + # set up and the test execution + index.number_of_shards: 3 + index.number_of_replicas: 0 mappings: bar: properties: @@ -24,7 +30,6 @@ setup: - do: cluster.health: - wait_for_active_shards: all wait_for_no_relocating_shards: true - do: From 4ea9ddb7d32dc8afcbb3d79cdb111240fda89906 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 17 Jan 2018 09:44:21 -0700 Subject: [PATCH 42/94] Unify nio read / write channel contexts (#28160) This commit is related to #27260. Right now we have separate read and write contexts for implementing specific protocol logic. However, some protocols require a closer relationship between read and write operations than is allowed by our current model. An example is HTTP which might require a write if some problem with request parsing was encountered. Additionally, some protocols require close messages to be sent when a channel is shutdown. This is also problematic in our current model, where we assume that channels should simply be queued for close and forgotten. This commit transitions to a single ChannelContext which implements all read, write, and close logic for protocols. It is the job of the context to tell the selector when to close the channel. A channel can still be manually queued for close with a selector. This is how server channels are closed for now. And this route allows timeout mechanisms on normal channel closes to be implemented. --- .../elasticsearch/nio/AbstractNioChannel.java | 22 +- .../nio/BytesChannelContext.java | 169 +++++++++ .../elasticsearch/nio/BytesReadContext.java | 64 ---- .../elasticsearch/nio/BytesWriteContext.java | 111 ------ .../nio/BytesWriteOperation.java | 88 +++++ .../org/elasticsearch/nio/ChannelContext.java | 81 +++++ .../org/elasticsearch/nio/ChannelFactory.java | 3 +- .../org/elasticsearch/nio/ESSelector.java | 5 + .../org/elasticsearch/nio/NioChannel.java | 2 - .../elasticsearch/nio/NioSocketChannel.java | 64 ++-- .../org/elasticsearch/nio/ReadContext.java | 35 -- .../elasticsearch/nio/SelectionKeyUtils.java | 55 ++- .../elasticsearch/nio/SocketEventHandler.java | 41 ++- .../org/elasticsearch/nio/SocketSelector.java | 14 +- .../org/elasticsearch/nio/WriteContext.java | 37 -- .../org/elasticsearch/nio/WriteOperation.java | 74 +--- .../nio/AcceptorEventHandlerTests.java | 2 +- .../nio/BytesChannelContextTests.java | 337 ++++++++++++++++++ .../nio/BytesReadContextTests.java | 142 -------- .../nio/BytesWriteContextTests.java | 212 ----------- .../nio/ChannelFactoryTests.java | 2 +- .../nio/NioServerSocketChannelTests.java | 2 +- .../nio/NioSocketChannelTests.java | 47 ++- .../nio/SocketEventHandlerTests.java | 119 ++++--- .../nio/SocketSelectorTests.java | 48 +-- .../nio/WriteOperationTests.java | 61 ++-- .../transport/nio/NioTransport.java | 38 +- .../nio/TcpNioServerSocketChannel.java | 7 +- .../transport/nio/TcpNioSocketChannel.java | 9 +- .../transport/nio/MockNioTransport.java | 24 +- 30 files changed, 1025 insertions(+), 890 deletions(-) create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java delete mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java create mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java delete mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java delete mode 100644 libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java index 8285fef6d3985..e3dcbad024cb2 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java @@ -26,7 +26,6 @@ import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; /** @@ -48,9 +47,6 @@ public abstract class AbstractNioChannel implements NioChannel { final S socketChannel; - // This indicates if the channel has been scheduled to be closed. Read the closeFuture to determine if - // the channel close process has completed. - final AtomicBoolean isClosing = new AtomicBoolean(false); private final InetSocketAddress localAddress; private final CompletableFuture closeContext = new CompletableFuture<>(); @@ -73,21 +69,6 @@ public InetSocketAddress getLocalAddress() { return localAddress; } - /** - * Schedules a channel to be closed by the selector event loop with which it is registered. - *

- * If the channel is open and the state can be transitioned to closed, the close operation will - * be scheduled with the event loop. - *

- * If the channel is already set to closed, it is assumed that it is already scheduled to be closed. - */ - @Override - public void close() { - if (isClosing.compareAndSet(false, true)) { - selector.queueChannelClose(this); - } - } - /** * Closes the channel synchronously. This method should only be called from the selector thread. *

@@ -95,8 +76,7 @@ public void close() { */ @Override public void closeFromSelector() throws IOException { - assert selector.isOnCurrentThread() : "Should only call from selector thread"; - isClosing.set(true); + selector.assertOnSelectorThread(); if (closeContext.isDone() == false) { try { closeRawChannel(); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java new file mode 100644 index 0000000000000..893c6986bdda7 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -0,0 +1,169 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.LinkedList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; + +public class BytesChannelContext implements ChannelContext { + + private final NioSocketChannel channel; + private final ReadConsumer readConsumer; + private final InboundChannelBuffer channelBuffer; + private final LinkedList queued = new LinkedList<>(); + private final AtomicBoolean isClosing = new AtomicBoolean(false); + private boolean peerClosed = false; + private boolean ioException = false; + + public BytesChannelContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { + this.channel = channel; + this.readConsumer = readConsumer; + this.channelBuffer = channelBuffer; + } + + @Override + public void channelRegistered() throws IOException {} + + @Override + public int read() throws IOException { + if (channelBuffer.getRemaining() == 0) { + // Requiring one additional byte will ensure that a new page is allocated. + channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); + } + + int bytesRead; + try { + bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); + } catch (IOException ex) { + ioException = true; + throw ex; + } + + if (bytesRead == -1) { + peerClosed = true; + return 0; + } + + channelBuffer.incrementIndex(bytesRead); + + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + bytesConsumed = readConsumer.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + return bytesRead; + } + + @Override + public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } + + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); + SocketSelector selector = channel.getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + // TODO: Eval if we will allow writes from sendMessage + selector.queueWriteInChannelBuffer(writeOperation); + } + + @Override + public void queueWriteOperation(WriteOperation writeOperation) { + channel.getSelector().assertOnSelectorThread(); + queued.add((BytesWriteOperation) writeOperation); + } + + @Override + public void flushChannel() throws IOException { + channel.getSelector().assertOnSelectorThread(); + int ops = queued.size(); + if (ops == 1) { + singleFlush(queued.pop()); + } else if (ops > 1) { + multiFlush(); + } + } + + @Override + public boolean hasQueuedWriteOps() { + channel.getSelector().assertOnSelectorThread(); + return queued.isEmpty() == false; + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public boolean selectorShouldClose() { + return peerClosed || ioException || isClosing.get(); + } + + @Override + public void closeFromSelector() { + channel.getSelector().assertOnSelectorThread(); + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + channelBuffer.close(); + for (BytesWriteOperation op : queued) { + channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + } + queued.clear(); + } + + private void singleFlush(BytesWriteOperation headOp) throws IOException { + try { + int written = channel.write(headOp.getBuffersToWrite()); + headOp.incrementIndex(written); + } catch (IOException e) { + channel.getSelector().executeFailedListener(headOp.getListener(), e); + ioException = true; + throw e; + } + + if (headOp.isFullyFlushed()) { + channel.getSelector().executeListener(headOp.getListener(), null); + } else { + queued.push(headOp); + } + } + + private void multiFlush() throws IOException { + boolean lastOpCompleted = true; + while (lastOpCompleted && queued.isEmpty() == false) { + BytesWriteOperation op = queued.pop(); + singleFlush(op); + lastOpCompleted = op.isFullyFlushed(); + } + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java deleted file mode 100644 index eeda147be6c70..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; - -public class BytesReadContext implements ReadContext { - - private final NioSocketChannel channel; - private final ReadConsumer readConsumer; - private final InboundChannelBuffer channelBuffer; - - public BytesReadContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - this.channel = channel; - this.channelBuffer = channelBuffer; - this.readConsumer = readConsumer; - } - - @Override - public int read() throws IOException { - if (channelBuffer.getRemaining() == 0) { - // Requiring one additional byte will ensure that a new page is allocated. - channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); - } - - int bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - channelBuffer.incrementIndex(bytesRead); - - int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0) { - bytesConsumed = readConsumer.consumeReads(channelBuffer); - channelBuffer.release(bytesConsumed); - } - - return bytesRead; - } - - @Override - public void close() { - channelBuffer.close(); - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java deleted file mode 100644 index c2816deef5343..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedList; -import java.util.function.BiConsumer; - -public class BytesWriteContext implements WriteContext { - - private final NioSocketChannel channel; - private final LinkedList queued = new LinkedList<>(); - - public BytesWriteContext(NioSocketChannel channel) { - this.channel = channel; - } - - @Override - public void sendMessage(Object message, BiConsumer listener) { - ByteBuffer[] buffers = (ByteBuffer[]) message; - if (channel.isWritable() == false) { - listener.accept(null, new ClosedChannelException()); - return; - } - - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); - SocketSelector selector = channel.getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - // TODO: Eval if we will allow writes from sendMessage - selector.queueWriteInChannelBuffer(writeOperation); - } - - @Override - public void queueWriteOperations(WriteOperation writeOperation) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to queue writes"; - queued.add(writeOperation); - } - - @Override - public void flushChannel() throws IOException { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to flush writes"; - int ops = queued.size(); - if (ops == 1) { - singleFlush(queued.pop()); - } else if (ops > 1) { - multiFlush(); - } - } - - @Override - public boolean hasQueuedWriteOps() { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to access queued writes"; - return queued.isEmpty() == false; - } - - @Override - public void clearQueuedWriteOps(Exception e) { - assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to clear queued writes"; - for (WriteOperation op : queued) { - channel.getSelector().executeFailedListener(op.getListener(), e); - } - queued.clear(); - } - - private void singleFlush(WriteOperation headOp) throws IOException { - try { - headOp.flush(); - } catch (IOException e) { - channel.getSelector().executeFailedListener(headOp.getListener(), e); - throw e; - } - - if (headOp.isFullyFlushed()) { - channel.getSelector().executeListener(headOp.getListener(), null); - } else { - queued.push(headOp); - } - } - - private void multiFlush() throws IOException { - boolean lastOpCompleted = true; - while (lastOpCompleted && queued.isEmpty() == false) { - WriteOperation op = queued.pop(); - singleFlush(op); - lastOpCompleted = op.isFullyFlushed(); - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java new file mode 100644 index 0000000000000..14e8cace66d09 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.function.BiConsumer; + +public class BytesWriteOperation implements WriteOperation { + + private final NioSocketChannel channel; + private final BiConsumer listener; + private final ByteBuffer[] buffers; + private final int[] offsets; + private final int length; + private int internalIndex; + + public BytesWriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { + this.channel = channel; + this.listener = listener; + this.buffers = buffers; + this.offsets = new int[buffers.length]; + int offset = 0; + for (int i = 0; i < buffers.length; i++) { + ByteBuffer buffer = buffers[i]; + offsets[i] = offset; + offset += buffer.remaining(); + } + length = offset; + } + + @Override + public BiConsumer getListener() { + return listener; + } + + @Override + public NioSocketChannel getChannel() { + return channel; + } + + public boolean isFullyFlushed() { + assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index=" + + internalIndex + "]"; + return internalIndex == length; + } + + public void incrementIndex(int delta) { + internalIndex += delta; + assert length >= internalIndex : "Should never increment index past length [length=" + length + ", post-increment index=" + + internalIndex + ", delta=" + delta + "]"; + } + + public ByteBuffer[] getBuffersToWrite() { + final int index = Arrays.binarySearch(offsets, internalIndex); + int offsetIndex = index < 0 ? (-(index + 1)) - 1 : index; + + ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; + + ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); + firstBuffer.position(internalIndex - offsets[offsetIndex]); + postIndexBuffers[0] = firstBuffer; + int j = 1; + for (int i = (offsetIndex + 1); i < buffers.length; ++i) { + postIndexBuffers[j++] = buffers[i].duplicate(); + } + + return postIndexBuffers; + } + +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java new file mode 100644 index 0000000000000..10afd53621dd8 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +/** + * This context should implement the specific logic for a channel. When a channel receives a notification + * that it is ready to perform certain operations (read, write, etc) the {@link ChannelContext} will be + * called. This context will need to implement all protocol related logic. Additionally, if any special + * close behavior is required, it should be implemented in this context. + * + * The only methods of the context that should ever be called from a non-selector thread are + * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + */ +public interface ChannelContext { + + void channelRegistered() throws IOException; + + int read() throws IOException; + + void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + + void queueWriteOperation(WriteOperation writeOperation); + + void flushChannel() throws IOException; + + boolean hasQueuedWriteOps(); + + /** + * Schedules a channel to be closed by the selector event loop with which it is registered. + *

+ * If the channel is open and the state can be transitioned to closed, the close operation will + * be scheduled with the event loop. + *

+ * If the channel is already set to closed, it is assumed that it is already scheduled to be closed. + *

+ * Depending on the underlying protocol of the channel, a close operation might simply close the socket + * channel or may involve reading and writing messages. + */ + void closeChannel(); + + /** + * This method indicates if a selector should close this channel. + * + * @return a boolean indicating if the selector should close + */ + boolean selectorShouldClose(); + + /** + * This method cleans up any context resources that need to be released when a channel is closed. It + * should only be called by the selector thread. + * + * @throws IOException during channel / context close + */ + void closeFromSelector() throws IOException; + + @FunctionalInterface + interface ReadConsumer { + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java index d90927af8b91a..a9909587453be 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java @@ -88,8 +88,7 @@ public ServerSocket openNioServerSocketChannel(InetSocketAddress address, Accept private Socket internalCreateChannel(SocketSelector selector, SocketChannel rawChannel) throws IOException { try { Socket channel = createChannel(selector, rawChannel); - assert channel.getReadContext() != null : "read context should have been set on channel"; - assert channel.getWriteContext() != null : "write context should have been set on channel"; + assert channel.getContext() != null : "channel context should have been set on channel"; assert channel.getExceptionContext() != null : "exception handler should have been set on channel"; return channel; } catch (Exception e) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java index ed566ffa7daf8..e923df4bfa9b8 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java @@ -163,6 +163,11 @@ public boolean isOnCurrentThread() { return Thread.currentThread() == thread; } + public void assertOnSelectorThread() { + assert isOnCurrentThread() : "Must be on selector thread to perform this operation. Currently on thread [" + + Thread.currentThread().getName() + "]."; + } + void wakeup() { // TODO: Do we need the wakeup optimizations that some other libraries use? selector.wakeup(); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java index 433ec204e8684..438c013ecd0aa 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java @@ -32,8 +32,6 @@ public interface NioChannel { InetSocketAddress getLocalAddress(); - void close(); - void closeFromSelector() throws IOException; void register() throws ClosedChannelException; diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java index 5260c0f5fcf16..c9ea14446d935 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java @@ -19,11 +19,13 @@ package org.elasticsearch.nio; +import org.elasticsearch.nio.utils.ExceptionsHelper; + import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; +import java.util.ArrayList; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -34,8 +36,7 @@ public class NioSocketChannel extends AbstractNioChannel { private final CompletableFuture connectContext = new CompletableFuture<>(); private final SocketSelector socketSelector; private final AtomicBoolean contextsSet = new AtomicBoolean(false); - private WriteContext writeContext; - private ReadContext readContext; + private ChannelContext context; private BiConsumer exceptionContext; private Exception connectException; @@ -47,14 +48,21 @@ public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) th @Override public void closeFromSelector() throws IOException { - assert socketSelector.isOnCurrentThread() : "Should only call from selector thread"; - // Even if the channel has already been closed we will clear any pending write operations just in case - if (writeContext.hasQueuedWriteOps()) { - writeContext.clearQueuedWriteOps(new ClosedChannelException()); + getSelector().assertOnSelectorThread(); + if (isOpen()) { + ArrayList closingExceptions = new ArrayList<>(2); + try { + super.closeFromSelector(); + } catch (IOException e) { + closingExceptions.add(e); + } + try { + context.closeFromSelector(); + } catch (IOException e) { + closingExceptions.add(e); + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); } - readContext.close(); - - super.closeFromSelector(); } @Override @@ -62,6 +70,10 @@ public SocketSelector getSelector() { return socketSelector; } + public int write(ByteBuffer buffer) throws IOException { + return socketChannel.write(buffer); + } + public int write(ByteBuffer[] buffers) throws IOException { if (buffers.length == 1) { return socketChannel.write(buffers[0]); @@ -82,33 +94,17 @@ public int read(ByteBuffer[] buffers) throws IOException { } } - public int read(InboundChannelBuffer buffer) throws IOException { - int bytesRead = (int) socketChannel.read(buffer.sliceBuffersFrom(buffer.getIndex())); - - if (bytesRead == -1) { - return bytesRead; - } - - buffer.incrementIndex(bytesRead); - return bytesRead; - } - - public void setContexts(ReadContext readContext, WriteContext writeContext, BiConsumer exceptionContext) { + public void setContexts(ChannelContext context, BiConsumer exceptionContext) { if (contextsSet.compareAndSet(false, true)) { - this.readContext = readContext; - this.writeContext = writeContext; + this.context = context; this.exceptionContext = exceptionContext; } else { throw new IllegalStateException("Contexts on this channel were already set. They should only be once."); } } - public WriteContext getWriteContext() { - return writeContext; - } - - public ReadContext getReadContext() { - return readContext; + public ChannelContext getContext() { + return context; } public BiConsumer getExceptionContext() { @@ -123,14 +119,6 @@ public boolean isConnectComplete() { return isConnectComplete0(); } - public boolean isWritable() { - return isClosing.get() == false; - } - - public boolean isReadable() { - return isClosing.get() == false; - } - /** * This method will attempt to complete the connection process for this channel. It should be called for * new channels or for a channel that has produced a OP_CONNECT event. If this method returns true then diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java deleted file mode 100644 index d23ce56f57ad1..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; - -public interface ReadContext extends AutoCloseable { - - int read() throws IOException; - - @Override - void close(); - - @FunctionalInterface - interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java index b6272ce713501..be2dc6f3414bc 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java @@ -26,28 +26,81 @@ public final class SelectionKeyUtils { private SelectionKeyUtils() {} + /** + * Adds an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_WRITE); } + /** + * Removes an interest in writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeWriteInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_WRITE); } + /** + * Removes an interest in connects and reads for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void setConnectAndReadInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ); } + /** + * Removes an interest in connects, reads, and writes for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setConnectReadAndWriteInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ | SelectionKey.OP_WRITE); + } + + /** + * Removes an interest in connects for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ public static void removeConnectInterested(NioChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_CONNECT); } - public static void setAcceptInterested(NioServerSocketChannel channel) { + /** + * Adds an interest in accepts for this channel while maintaining other interests. + * + * @param channel the channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static void setAcceptInterested(NioServerSocketChannel channel) throws CancelledKeyException { SelectionKey selectionKey = channel.getSelectionKey(); selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_ACCEPT); } + + + /** + * Checks for an interest in writes for this channel. + * + * @param channel the channel + * @return a boolean indicating if we are currently interested in writes for this channel + * @throws CancelledKeyException if the key was already cancelled + */ + public static boolean isWriteInterested(NioSocketChannel channel) throws CancelledKeyException { + return (channel.getSelectionKey().interestOps() & SelectionKey.OP_WRITE) != 0; + } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index d3be18f377638..d5977cee851ed 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -43,8 +43,14 @@ public SocketEventHandler(Logger logger) { * * @param channel that was registered */ - protected void handleRegistration(NioSocketChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); + protected void handleRegistration(NioSocketChannel channel) throws IOException { + ChannelContext context = channel.getContext(); + context.channelRegistered(); + if (context.hasQueuedWriteOps()) { + SelectionKeyUtils.setConnectReadAndWriteInterested(channel); + } else { + SelectionKeyUtils.setConnectAndReadInterested(channel); + } } /** @@ -86,10 +92,7 @@ protected void connectException(NioSocketChannel channel, Exception exception) { * @param channel that can be read */ protected void handleRead(NioSocketChannel channel) throws IOException { - int bytesRead = channel.getReadContext().read(); - if (bytesRead == -1) { - handleClose(channel); - } + channel.getContext().read(); } /** @@ -107,16 +110,11 @@ protected void readException(NioSocketChannel channel, Exception exception) { * This method is called when a channel signals it is ready to receive writes. All of the write logic * should occur in this call. * - * @param channel that can be read + * @param channel that can be written to */ protected void handleWrite(NioSocketChannel channel) throws IOException { - WriteContext channelContext = channel.getWriteContext(); + ChannelContext channelContext = channel.getContext(); channelContext.flushChannel(); - if (channelContext.hasQueuedWriteOps()) { - SelectionKeyUtils.setWriteInterested(channel); - } else { - SelectionKeyUtils.removeWriteInterested(channel); - } } /** @@ -153,6 +151,23 @@ protected void listenerException(BiConsumer listener, Exceptio logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception); } + /** + * @param channel that was handled + */ + protected void postHandling(NioSocketChannel channel) { + if (channel.getContext().selectorShouldClose()) { + handleClose(channel); + } else { + boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(channel); + boolean pendingWrites = channel.getContext().hasQueuedWriteOps(); + if (currentlyWriteInterested == false && pendingWrites) { + SelectionKeyUtils.setWriteInterested(channel); + } else if (currentlyWriteInterested && pendingWrites == false) { + SelectionKeyUtils.removeWriteInterested(channel); + } + } + } + private void exceptionCaught(NioSocketChannel channel, Exception e) { channel.getExceptionContext().accept(channel, e); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index ac8ad87b726a2..e35aa7b4d226b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -64,6 +64,8 @@ void processKey(SelectionKey selectionKey) { handleRead(nioSocketChannel); } } + + eventHandler.postHandling(nioSocketChannel); } @Override @@ -118,12 +120,12 @@ public void queueWrite(WriteOperation writeOperation) { * @param writeOperation to be queued in a channel's buffer */ public void queueWriteInChannelBuffer(WriteOperation writeOperation) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); NioSocketChannel channel = writeOperation.getChannel(); - WriteContext context = channel.getWriteContext(); + ChannelContext context = channel.getContext(); try { SelectionKeyUtils.setWriteInterested(channel); - context.queueWriteOperations(writeOperation); + context.queueWriteOperation(writeOperation); } catch (Exception e) { executeFailedListener(writeOperation.getListener(), e); } @@ -137,7 +139,7 @@ public void queueWriteInChannelBuffer(WriteOperation writeOperation) { * @param value to provide to listener */ public void executeListener(BiConsumer listener, V value) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(value, null); } catch (Exception e) { @@ -153,7 +155,7 @@ public void executeListener(BiConsumer listener, V value) { * @param exception to provide to listener */ public void executeFailedListener(BiConsumer listener, Exception exception) { - assert isOnCurrentThread() : "Must be on selector thread"; + assertOnSelectorThread(); try { listener.accept(null, exception); } catch (Exception e) { @@ -180,7 +182,7 @@ private void handleRead(NioSocketChannel nioSocketChannel) { private void handleQueuedWrites() { WriteOperation writeOperation; while ((writeOperation = queuedWrites.poll()) != null) { - if (writeOperation.getChannel().isWritable()) { + if (writeOperation.getChannel().isOpen()) { queueWriteInChannelBuffer(writeOperation); } else { executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java deleted file mode 100644 index 39e69e8f9a94e..0000000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteContext.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.util.function.BiConsumer; - -public interface WriteContext { - - void sendMessage(Object message, BiConsumer listener); - - void queueWriteOperations(WriteOperation writeOperation); - - void flushChannel() throws IOException; - - boolean hasQueuedWriteOps(); - - void clearQueuedWriteOps(Exception e); - -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index b6fcc838a964f..09800d981bd2d 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -19,74 +19,16 @@ package org.elasticsearch.nio; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.function.BiConsumer; -public class WriteOperation { - - private final NioSocketChannel channel; - private final BiConsumer listener; - private final ByteBuffer[] buffers; - private final int[] offsets; - private final int length; - private int internalIndex; - - public WriteOperation(NioSocketChannel channel, ByteBuffer[] buffers, BiConsumer listener) { - this.channel = channel; - this.listener = listener; - this.buffers = buffers; - this.offsets = new int[buffers.length]; - int offset = 0; - for (int i = 0; i < buffers.length; i++) { - ByteBuffer buffer = buffers[i]; - offsets[i] = offset; - offset += buffer.remaining(); - } - length = offset; - } - - public ByteBuffer[] getByteBuffers() { - return buffers; - } - - public BiConsumer getListener() { - return listener; - } - - public NioSocketChannel getChannel() { - return channel; - } - - public boolean isFullyFlushed() { - return internalIndex == length; - } - - public int flush() throws IOException { - int written = channel.write(getBuffersToWrite()); - internalIndex += written; - return written; - } - - private ByteBuffer[] getBuffersToWrite() { - int offsetIndex = getOffsetIndex(internalIndex); - - ByteBuffer[] postIndexBuffers = new ByteBuffer[buffers.length - offsetIndex]; - - ByteBuffer firstBuffer = buffers[offsetIndex].duplicate(); - firstBuffer.position(internalIndex - offsets[offsetIndex]); - postIndexBuffers[0] = firstBuffer; - int j = 1; - for (int i = (offsetIndex + 1); i < buffers.length; ++i) { - postIndexBuffers[j++] = buffers[i].duplicate(); - } +/** + * This is a basic write operation that can be queued with a channel. The only requirements of a write + * operation is that is has a listener and a reference to its channel. The actual conversion of the write + * operation implementation to bytes will be performed by the {@link ChannelContext}. + */ +public interface WriteOperation { - return postIndexBuffers; - } + BiConsumer getListener(); - private int getOffsetIndex(int offset) { - final int i = Arrays.binarySearch(offsets, offset); - return i < 0 ? (-(i + 1)) - 1 : i; - } + NioSocketChannel getChannel(); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java index 9d8f47fe3ef4d..1f51fdc2017ae 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java @@ -80,7 +80,7 @@ public void testHandleAcceptCallsChannelFactory() throws IOException { @SuppressWarnings("unchecked") public void testHandleAcceptCallsServerAcceptCallback() throws IOException { NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class), socketSelector); - childChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + childChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); when(channelFactory.acceptNioChannel(same(channel), same(socketSelector))).thenReturn(childChannel); handler.acceptChannel(channel); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java new file mode 100644 index 0000000000000..db0e6ae80badf --- /dev/null +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BytesChannelContextTests extends ESTestCase { + + private ChannelContext.ReadConsumer readConsumer; + private NioSocketChannel channel; + private BytesChannelContext context; + private InboundChannelBuffer channelBuffer; + private SocketSelector selector; + private BiConsumer listener; + private int messageLength; + + @Before + @SuppressWarnings("unchecked") + public void init() { + readConsumer = mock(ChannelContext.ReadConsumer.class); + + messageLength = randomInt(96) + 20; + selector = mock(SocketSelector.class); + listener = mock(BiConsumer.class); + channel = mock(NioSocketChannel.class); + Supplier pageSupplier = () -> + new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); + channelBuffer = new InboundChannelBuffer(pageSupplier); + context = new BytesChannelContext(channel, readConsumer, channelBuffer); + + when(channel.getSelector()).thenReturn(selector); + when(selector.isOnCurrentThread()).thenReturn(true); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + } + + public void testMultipleReadsConsumed() throws IOException { + byte[] bytes = createMessage(messageLength * 2); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + + assertEquals(bytes.length, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testPartialRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + + assertEquals(messageLength, context.read()); + + assertEquals(bytes.length, channelBuffer.getIndex()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); + + IOException ex = expectThrows(IOException.class, () -> context.read()); + assertSame(ioException, ex); + } + + public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenThrow(new IOException()); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.read()); + assertTrue(context.selectorShouldClose()); + } + + public void testReadLessThanZeroMeansReadyForClose() throws IOException { + when(channel.read(any(ByteBuffer[].class))).thenReturn(-1); + + assertEquals(0, context.read()); + + assertTrue(context.selectorShouldClose()); + } + + public void testCloseClosesChannelBuffer() throws IOException { + Runnable closer = mock(Runnable.class); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + BytesChannelContext context = new BytesChannelContext(channel, readConsumer, buffer); + context.closeFromSelector(); + verify(closer).run(); + } + + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testWriteIsQueuedInChannel() { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testWriteOpsClearedOnClose() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(channel, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + + context.closeFromSelector(); + + verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); + + assertFalse(context.hasQueuedWriteOps()); + } + + public void testQueuedWriteIsFlushedInFlushCall() throws Exception { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.isFullyFlushed()).thenReturn(true); + when(writeOperation.getListener()).thenReturn(listener); + context.flushChannel(); + + verify(channel).write(buffers); + verify(selector).executeListener(listener, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testPartialFlush() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(listener, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + } + + @SuppressWarnings("unchecked") + public void testMultipleWritesPartialFlushes() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + BiConsumer listener2 = mock(BiConsumer.class); + BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); + BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); + when(writeOperation1.getListener()).thenReturn(listener); + when(writeOperation2.getListener()).thenReturn(listener2); + context.queueWriteOperation(writeOperation1); + context.queueWriteOperation(writeOperation2); + + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation1.isFullyFlushed()).thenReturn(true); + when(writeOperation2.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(selector).executeListener(listener, null); + verify(listener2, times(0)).accept(null, null); + assertTrue(context.hasQueuedWriteOps()); + + when(writeOperation2.isFullyFlushed()).thenReturn(true); + + context.flushChannel(); + + verify(selector).executeListener(listener2, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + assertTrue(context.hasQueuedWriteOps()); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + when(writeOperation.getListener()).thenReturn(listener); + expectThrows(IOException.class, () -> context.flushChannel()); + + verify(selector).executeFailedListener(listener, exception); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(channel.write(buffers)).thenThrow(exception); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.flushChannel()); + assertTrue(context.selectorShouldClose()); + } + + public void initiateCloseSchedulesCloseWithSelector() { + context.closeChannel(); + verify(selector).queueChannelClose(channel); + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } +} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java deleted file mode 100644 index 69f187378aca5..0000000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.function.Supplier; - -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesReadContextTests extends ESTestCase { - - private ReadContext.ReadConsumer readConsumer; - private NioSocketChannel channel; - private BytesReadContext readContext; - private InboundChannelBuffer channelBuffer; - private int messageLength; - - @Before - public void init() { - readConsumer = mock(ReadContext.ReadConsumer.class); - - messageLength = randomInt(96) + 20; - channel = mock(NioSocketChannel.class); - Supplier pageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); - channelBuffer = new InboundChannelBuffer(pageSupplier); - readContext = new BytesReadContext(channel, readConsumer, channelBuffer); - } - - public void testSuccessfulRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); - } - - public void testMultipleReadsConsumed() throws IOException { - byte[] bytes = createMessage(messageLength * 2); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); - - assertEquals(bytes.length, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testPartialRead() throws IOException { - byte[] bytes = createMessage(messageLength); - - when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { - ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; - buffers[0].put(bytes); - return bytes.length; - }); - - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(0, messageLength); - - assertEquals(messageLength, readContext.read()); - - assertEquals(bytes.length, channelBuffer.getIndex()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); - - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); - - assertEquals(messageLength, readContext.read()); - - assertEquals(0, channelBuffer.getIndex()); - assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); - verify(readConsumer, times(3)).consumeReads(channelBuffer); - } - - public void testReadThrowsIOException() throws IOException { - IOException ioException = new IOException(); - when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); - - IOException ex = expectThrows(IOException.class, () -> readContext.read()); - assertSame(ioException, ex); - } - - public void closeClosesChannelBuffer() { - InboundChannelBuffer buffer = mock(InboundChannelBuffer.class); - BytesReadContext readContext = new BytesReadContext(channel, readConsumer, buffer); - - readContext.close(); - - verify(buffer).close(); - } - - private static byte[] createMessage(int length) { - byte[] bytes = new byte[length]; - for (int i = 0; i < length; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java deleted file mode 100644 index 9d5b1c92cb6b7..0000000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.function.BiConsumer; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.isNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BytesWriteContextTests extends ESTestCase { - - private SocketSelector selector; - private BiConsumer listener; - private BytesWriteContext writeContext; - private NioSocketChannel channel; - - @Before - @SuppressWarnings("unchecked") - public void setUp() throws Exception { - super.setUp(); - selector = mock(SocketSelector.class); - listener = mock(BiConsumer.class); - channel = mock(NioSocketChannel.class); - writeContext = new BytesWriteContext(channel); - - when(channel.getSelector()).thenReturn(selector); - when(selector.isOnCurrentThread()).thenReturn(true); - } - - public void testWriteFailsIfChannelNotWritable() throws Exception { - when(channel.isWritable()).thenReturn(false); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); - } - - public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(selector.isOnCurrentThread()).thenReturn(false); - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWrite(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); - - when(channel.isWritable()).thenReturn(true); - - ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; - writeContext.sendMessage(buffers, listener); - - verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); - WriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(channel, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getByteBuffers()[0]); - } - - public void testWriteIsQueuedInChannel() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - } - - public void testWriteOpsCanBeCleared() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - writeContext.queueWriteOperations(new WriteOperation(channel, buffer, listener)); - - assertTrue(writeContext.hasQueuedWriteOps()); - - ClosedChannelException e = new ClosedChannelException(); - writeContext.clearQueuedWriteOps(e); - - verify(selector).executeFailedListener(listener, e); - - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testQueuedWriteIsFlushedInFlushCall() throws Exception { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(true); - when(writeOperation.getListener()).thenReturn(listener); - writeContext.flushChannel(); - - verify(writeOperation).flush(); - verify(selector).executeListener(listener, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testPartialFlush() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(listener, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - } - - @SuppressWarnings("unchecked") - public void testMultipleWritesPartialFlushes() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - BiConsumer listener2 = mock(BiConsumer.class); - WriteOperation writeOperation1 = mock(WriteOperation.class); - WriteOperation writeOperation2 = mock(WriteOperation.class); - when(writeOperation1.getListener()).thenReturn(listener); - when(writeOperation2.getListener()).thenReturn(listener2); - writeContext.queueWriteOperations(writeOperation1); - writeContext.queueWriteOperations(writeOperation2); - - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation1.isFullyFlushed()).thenReturn(true); - when(writeOperation2.isFullyFlushed()).thenReturn(false); - writeContext.flushChannel(); - - verify(selector).executeListener(listener, null); - verify(listener2, times(0)).accept(null, null); - assertTrue(writeContext.hasQueuedWriteOps()); - - when(writeOperation2.isFullyFlushed()).thenReturn(true); - - writeContext.flushChannel(); - - verify(selector).executeListener(listener2, null); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { - assertFalse(writeContext.hasQueuedWriteOps()); - - WriteOperation writeOperation = mock(WriteOperation.class); - writeContext.queueWriteOperations(writeOperation); - - assertTrue(writeContext.hasQueuedWriteOps()); - - IOException exception = new IOException(); - when(writeOperation.flush()).thenThrow(exception); - when(writeOperation.getListener()).thenReturn(listener); - expectThrows(IOException.class, () -> writeContext.flushChannel()); - - verify(selector).executeFailedListener(listener, exception); - assertFalse(writeContext.hasQueuedWriteOps()); - } - - private byte[] generateBytes(int n) { - n += 10; - byte[] bytes = new byte[n]; - for (int i = 0; i < n; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java index c1183af4e5b2e..e3f42139fd80e 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java @@ -139,7 +139,7 @@ private static class TestChannelFactory extends ChannelFactory closeFuture = PlainActionFuture.newFuture(); channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - channel.close(); + selector.queueChannelClose(channel); closeFuture.actionGet(); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java index 6a32b11f18b0f..dd0956458fad3 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -66,7 +67,7 @@ public void testClose() throws Exception { CountDownLatch latch = new CountDownLatch(1); NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { @Override public void onResponse(Void o) { @@ -86,7 +87,45 @@ public void onFailure(Exception e) { PlainActionFuture closeFuture = PlainActionFuture.newFuture(); socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - socketChannel.close(); + selector.queueChannelClose(socketChannel); + closeFuture.actionGet(); + + assertTrue(closedRawChannel.get()); + assertFalse(socketChannel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); + } + + @SuppressWarnings("unchecked") + public void testCloseContextExceptionDoesNotStopClose() throws Exception { + AtomicBoolean isClosed = new AtomicBoolean(false); + CountDownLatch latch = new CountDownLatch(1); + + IOException ioException = new IOException(); + NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); + ChannelContext context = mock(ChannelContext.class); + doThrow(ioException).when(context).closeFromSelector(); + socketChannel.setContexts(context, mock(BiConsumer.class)); + socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); + + assertTrue(socketChannel.isOpen()); + assertFalse(closedRawChannel.get()); + assertFalse(isClosed.get()); + + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(socketChannel); closeFuture.actionGet(); assertTrue(closedRawChannel.get()); @@ -100,7 +139,7 @@ public void testConnectSucceeds() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenReturn(true); NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -117,7 +156,7 @@ public void testConnectFails() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenThrow(new ConnectException()); NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ReadContext.class), mock(WriteContext.class), mock(BiConsumer.class)); + socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index 2898cf18d5b9d..e0f833c9051d0 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -28,8 +28,10 @@ import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; +import java.util.function.Supplier; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -39,7 +41,6 @@ public class SocketEventHandlerTests extends ESTestCase { private SocketEventHandler handler; private NioSocketChannel channel; - private ReadContext readContext; private SocketChannel rawChannel; @Before @@ -50,21 +51,37 @@ public void setUpHandler() throws IOException { handler = new SocketEventHandler(logger); rawChannel = mock(SocketChannel.class); channel = new DoNotRegisterChannel(rawChannel, socketSelector); - readContext = mock(ReadContext.class); when(rawChannel.finishConnect()).thenReturn(true); - channel.setContexts(readContext, new BytesWriteContext(channel), exceptionHandler); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), () -> {}); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + channel.setContexts(new BytesChannelContext(channel, mock(ChannelContext.ReadConsumer.class), buffer), exceptionHandler); channel.register(); channel.finishConnect(); when(socketSelector.isOnCurrentThread()).thenReturn(true); } + public void testRegisterCallsContext() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + ChannelContext channelContext = mock(ChannelContext.class); + when(channel.getContext()).thenReturn(channelContext); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); + handler.handleRegistration(channel); + verify(channelContext).channelRegistered(); + } + public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException { handler.handleRegistration(channel); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, channel.getSelectionKey().interestOps()); } + public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { + channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class)); + handler.handleRegistration(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + } + public void testRegistrationExceptionCallsExceptionHandler() throws IOException { CancelledKeyException exception = new CancelledKeyException(); handler.registrationException(channel, exception); @@ -83,68 +100,76 @@ public void testConnectExceptionCallsExceptionHandler() throws IOException { verify(exceptionHandler).accept(channel, exception); } - public void testHandleReadDelegatesToReadContext() throws IOException { - when(readContext.read()).thenReturn(1); + public void testHandleReadDelegatesToContext() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + ChannelContext context = mock(ChannelContext.class); + channel.setContexts(context, exceptionHandler); + when(context.read()).thenReturn(1); handler.handleRead(channel); - - verify(readContext).read(); + verify(context).read(); } - public void testHandleReadMarksChannelForCloseIfPeerClosed() throws IOException { - NioSocketChannel nioSocketChannel = mock(NioSocketChannel.class); - when(nioSocketChannel.getReadContext()).thenReturn(readContext); - when(readContext.read()).thenReturn(-1); - - handler.handleRead(nioSocketChannel); - - verify(nioSocketChannel).closeFromSelector(); - } - - public void testReadExceptionCallsExceptionHandler() throws IOException { + public void testReadExceptionCallsExceptionHandler() { IOException exception = new IOException(); handler.readException(channel, exception); verify(exceptionHandler).accept(channel, exception); } - @SuppressWarnings("unchecked") - public void testHandleWriteWithCompleteFlushRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + public void testWriteExceptionCallsExceptionHandler() { + IOException exception = new IOException(); + handler.writeException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); + public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + ChannelContext context = mock(ChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - when(rawChannel.write(buffers[0])).thenReturn(1); - handler.handleWrite(channel); + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(true); + handler.postHandling(channel); - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + verify(channel).closeFromSelector(); } - @SuppressWarnings("unchecked") - public void testHandleWriteWithInCompleteFlushLeavesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); - - ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; - channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, buffers, mock(BiConsumer.class))); + public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOException { + NioSocketChannel channel = mock(NioSocketChannel.class); + ChannelContext context = mock(ChannelContext.class); + when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); - when(rawChannel.write(buffers[0])).thenReturn(0); - handler.handleWrite(channel); + when(channel.getContext()).thenReturn(context); + when(context.selectorShouldClose()).thenReturn(false); + handler.postHandling(channel); - assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + verify(channel, times(0)).closeFromSelector(); } - public void testHandleWriteWithNoOpsRemovesOP_WRITEInterest() throws IOException { - SelectionKey selectionKey = channel.getSelectionKey(); - setWriteAndRead(channel); + public void testPostHandlingWillAddWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ)); + ChannelContext context = mock(ChannelContext.class); + channel.setContexts(context, null); + + when(context.hasQueuedWriteOps()).thenReturn(true); + + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + } + + public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { + NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); + channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE)); + ChannelContext context = mock(ChannelContext.class); + channel.setContexts(context, null); - handler.handleWrite(channel); + when(context.hasQueuedWriteOps()).thenReturn(false); - assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + handler.postHandling(channel); + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); } private void setWriteAndRead(NioChannel channel) { @@ -152,10 +177,4 @@ private void setWriteAndRead(NioChannel channel) { SelectionKeyUtils.removeConnectInterested(channel); SelectionKeyUtils.setWriteInterested(channel); } - - public void testWriteExceptionCallsExceptionHandler() throws IOException { - IOException exception = new IOException(); - handler.writeException(channel, exception); - verify(exceptionHandler).accept(channel, exception); - } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index e50da352623b5..9197fe38dbc0a 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -49,7 +49,7 @@ public class SocketSelectorTests extends ESTestCase { private SocketEventHandler eventHandler; private NioSocketChannel channel; private TestSelectionKey selectionKey; - private WriteContext writeContext; + private ChannelContext channelContext; private BiConsumer listener; private ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; private Selector rawSelector; @@ -60,7 +60,7 @@ public void setUp() throws Exception { super.setUp(); eventHandler = mock(SocketEventHandler.class); channel = mock(NioSocketChannel.class); - writeContext = mock(WriteContext.class); + channelContext = mock(ChannelContext.class); listener = mock(BiConsumer.class); selectionKey = new TestSelectionKey(0); selectionKey.attach(channel); @@ -71,7 +71,7 @@ public void setUp() throws Exception { when(channel.isOpen()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); - when(channel.getWriteContext()).thenReturn(writeContext); + when(channel.getContext()).thenReturn(channelContext); when(channel.isConnectComplete()).thenReturn(true); when(channel.getSelector()).thenReturn(socketSelector); } @@ -129,75 +129,71 @@ public void testConnectIncompleteWillNotNotify() throws Exception { public void testQueueWriteWhenNotRunning() throws Exception { socketSelector.close(); - socketSelector.queueWrite(new WriteOperation(channel, buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(channel, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } - public void testQueueWriteChannelIsNoLongerWritable() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + public void testQueueWriteChannelIsClosed() throws Exception { + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(false); + when(channel.isOpen()).thenReturn(false); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); } public void testQueueWriteSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); socketSelector.queueWrite(writeOperation); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.preSelect(); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } public void testQueueWriteSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); socketSelector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.preSelect(); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - when(channel.isWritable()).thenReturn(true); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext).queueWriteOperations(writeOperation); + verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); } public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOperation = new BytesWriteOperation(channel, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); - when(channel.isWritable()).thenReturn(true); when(channel.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); socketSelector.queueWriteInChannelBuffer(writeOperation); - verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); } @@ -285,6 +281,16 @@ public void testReadEventWithException() throws Exception { verify(eventHandler).readException(channel, ioException); } + public void testWillCallPostHandleAfterChannelHandling() throws Exception { + selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + + socketSelector.processKey(selectionKey); + + verify(eventHandler).handleWrite(channel); + verify(eventHandler).handleRead(channel); + verify(eventHandler).postHandling(channel); + } + public void testCleanup() throws Exception { NioSocketChannel unRegisteredChannel = mock(NioSocketChannel.class); @@ -292,7 +298,7 @@ public void testCleanup() throws Exception { socketSelector.preSelect(); - socketSelector.queueWrite(new WriteOperation(mock(NioSocketChannel.class), buffers, listener)); + socketSelector.queueWrite(new BytesWriteOperation(mock(NioSocketChannel.class), buffers, listener)); socketSelector.scheduleForRegistration(unRegisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java index da74269b8253a..59fb9cde4389c 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/WriteOperationTests.java @@ -45,71 +45,58 @@ public void setFields() { } - public void testFlush() throws IOException { + public void testFullyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - - when(channel.write(any(ByteBuffer[].class))).thenReturn(10); - - writeOp.flush(); + writeOp.incrementIndex(10); assertTrue(writeOp.isFullyFlushed()); } - public void testPartialFlush() throws IOException { + public void testPartiallyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); - - when(channel.write(any(ByteBuffer[].class))).thenReturn(5); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); } public void testMultipleFlushesWithCompositeBuffer() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)}; - WriteOperation writeOp = new WriteOperation(channel, buffers, listener); + BytesWriteOperation writeOp = new BytesWriteOperation(channel, buffers, listener); ArgumentCaptor buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class); - when(channel.write(buffersCaptor.capture())).thenReturn(5) - .thenReturn(5) - .thenReturn(2) - .thenReturn(15) - .thenReturn(1); - - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); + writeOp.incrementIndex(5); assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertFalse(writeOp.isFullyFlushed()); - writeOp.flush(); - assertTrue(writeOp.isFullyFlushed()); - - List values = buffersCaptor.getAllValues(); - ByteBuffer[] byteBuffers = values.get(0); - assertEquals(3, byteBuffers.length); - assertEquals(10, byteBuffers[0].remaining()); - - byteBuffers = values.get(1); + ByteBuffer[] byteBuffers = writeOp.getBuffersToWrite(); assertEquals(3, byteBuffers.length); assertEquals(5, byteBuffers[0].remaining()); - byteBuffers = values.get(2); + writeOp.incrementIndex(5); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(15, byteBuffers[0].remaining()); - byteBuffers = values.get(3); + writeOp.incrementIndex(2); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(2, byteBuffers.length); assertEquals(13, byteBuffers[0].remaining()); - byteBuffers = values.get(4); + writeOp.incrementIndex(15); + assertFalse(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); assertEquals(1, byteBuffers.length); assertEquals(1, byteBuffers[0].remaining()); + + writeOp.incrementIndex(1); + assertTrue(writeOp.isFullyFlushed()); + byteBuffers = writeOp.getBuffersToWrite(); + assertEquals(1, byteBuffers.length); + assertEquals(0, byteBuffers[0].remaining()); } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 9917bf79f593b..d25d3c5974ad8 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -33,13 +33,12 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.BytesChannelContext; +import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -72,12 +71,12 @@ public class NioTransport extends TcpTransport { public static final Setting NIO_ACCEPTOR_COUNT = intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - private final PageCacheRecycler pageCacheRecycler; + protected final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private volatile NioGroup nioGroup; private volatile TcpChannelFactory clientChannelFactory; - NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + protected NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); @@ -111,13 +110,13 @@ protected void doStart() { NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); - clientChannelFactory = new TcpChannelFactory(clientProfileSettings); + clientChannelFactory = channelFactory(clientProfileSettings, true); if (useNetworkServer) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; - TcpChannelFactory factory = new TcpChannelFactory(profileSettings); + TcpChannelFactory factory = channelFactory(profileSettings, false); profileToChannelFactory.putIfAbsent(profileName, factory); bindServer(profileSettings); } @@ -144,19 +143,30 @@ protected void stopInternal() { profileToChannelFactory.clear(); } - private void exceptionCaught(NioSocketChannel channel, Exception exception) { + protected void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } - private void acceptChannel(NioSocketChannel channel) { + protected void acceptChannel(NioSocketChannel channel) { serverAcceptedChannel((TcpNioSocketChannel) channel); } - private class TcpChannelFactory extends ChannelFactory { + protected TcpChannelFactory channelFactory(ProfileSettings settings, boolean isClient) { + return new TcpChannelFactoryImpl(settings); + } + + protected abstract class TcpChannelFactory extends ChannelFactory { + + protected TcpChannelFactory(RawChannelFactory rawChannelFactory) { + super(rawChannelFactory); + } + } + + private class TcpChannelFactoryImpl extends TcpChannelFactory { private final String profileName; - TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) { + private TcpChannelFactoryImpl(ProfileSettings profileSettings) { super(new RawChannelFactory(profileSettings.tcpNoDelay, profileSettings.tcpKeepAlive, profileSettings.reuseAddress, @@ -172,10 +182,10 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + ChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught); + BytesChannelContext context = new BytesChannelContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); + nioChannel.setContexts(context, NioTransport.this::exceptionCaught); return nioChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index 7f657c763486d..f0d01bf5a7da6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -38,7 +38,7 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements private final String profile; - TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, + public TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, ChannelFactory channelFactory, AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); @@ -60,6 +60,11 @@ public InetSocketAddress getRemoteAddress() { return null; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java index 5633899a04b9f..c2064e53ca64f 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java @@ -33,13 +33,13 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel private final String profile; - TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { + public TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { super(socketChannel, selector); this.profile = profile; } public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } @Override @@ -59,6 +59,11 @@ public void addCloseListener(ActionListener listener) { addCloseListener(ActionListener.toBiConsumer(listener)); } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String toString() { return "TcpNioSocketChannel{" + diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index a8876453b5b2f..c5ec4c6bfb7aa 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,14 +31,13 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesReadContext; -import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.BytesChannelContext; +import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; @@ -162,11 +161,10 @@ public MockSocketChannel createChannel(SocketSelector selector, SocketChannel ch Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + ChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - BytesWriteContext writeContext = new BytesWriteContext(nioChannel); - nioChannel.setContexts(readContext, writeContext, MockNioTransport.this::exceptionCaught); + BytesChannelContext context = new BytesChannelContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); + nioChannel.setContexts(context, MockNioTransport.this::exceptionCaught); return nioChannel; } @@ -188,6 +186,11 @@ private static class MockServerChannel extends NioServerSocketChannel implements this.profile = profile; } + @Override + public void close() { + getSelector().queueChannelClose(this); + } + @Override public String getProfile() { return profile; @@ -224,6 +227,11 @@ private MockSocketChannel(String profile, java.nio.channels.SocketChannel socket this.profile = profile; } + @Override + public void close() { + getContext().closeChannel(); + } + @Override public String getProfile() { return profile; @@ -243,7 +251,7 @@ public void setSoLinger(int value) throws IOException { @Override public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } } From 1335232e6b602c370df237f353f38e23bcfd35f4 Mon Sep 17 00:00:00 2001 From: Tony Zeng Date: Wed, 17 Jan 2018 11:04:04 -0600 Subject: [PATCH 43/94] Add toString() implementation for UpdateRequest (#27997) --- .../action/update/UpdateRequest.java | 25 +++++++++++++++++++ .../action/update/UpdateRequestTests.java | 12 +++++++++ 2 files changed, 37 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index fa8c46edf5b7e..9bfb78f5058b4 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import java.util.Arrays; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -893,4 +894,28 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + @Override + public String toString() { + StringBuilder res = new StringBuilder() + .append("update {[").append(index) + .append("][").append(type) + .append("][").append(id).append("]"); + res.append(", doc_as_upsert[").append(docAsUpsert).append("]"); + if (doc != null) { + res.append(", doc[").append(doc).append("]"); + } + if (script != null) { + res.append(", script[").append(script).append("]"); + } + if (upsertRequest != null) { + res.append(", upsert[").append(upsertRequest).append("]"); + } + res.append(", scripted_upsert[").append(scriptedUpsert).append("]"); + res.append(", detect_noop[").append(detectNoop).append("]"); + if (fields != null) { + res.append(", fields[").append(Arrays.toString(fields)).append("]"); + } + return res.append("}").toString(); + } } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 7049d0fa9e98e..36266026504a9 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -639,4 +639,16 @@ public void testUpdateScript() throws Exception { assertThat(result.action(), instanceOf(UpdateResponse.class)); assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.NOOP)); } + + public void testToString() throws IOException { + UpdateRequest request = new UpdateRequest("test", "type1", "1") + .script(mockInlineScript("ctx._source.body = \"foo\"")); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = \"foo\"', options={}, params={}}], " + + "scripted_upsert[false], detect_noop[true]}")); + request = new UpdateRequest("test", "type1", "1").fromXContent( + createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))); + assertThat(request.toString(), equalTo("update {[test][type1][1], doc_as_upsert[false], " + + "doc[index {[null][null][null], source[{\"body\":\"bar\"}]}], scripted_upsert[false], detect_noop[true]}")); + } } From c122a6d4a09a49bf152da850baabf2e491f7d366 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Wed, 17 Jan 2018 15:01:02 -0800 Subject: [PATCH 44/94] remove recommended junit intellij setting change (#28274) The "Shorten Command Line" setting in Intellij's JUnit test configuration was recommended to change to `classpath file`. This setting has been causing issues with JDK9 where some modules were not being found at runtime. This PR removes the recommendation to change this setting and instead asks that users verify that it is set to `user-local default:none`. --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 30e8261c87427..985c70a39a091 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -126,8 +126,8 @@ Alternatively, `idea.no.launcher=true` can be set in the [`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html) file which can be accessed under Help > Edit Custom Properties (this will require a restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to -`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from -`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your +`Run->Edit Configurations->...->Defaults->JUnit` and verify that the `Shorten command line` setting is set to +`user-local default: none`. You may also need to [remove `ant-javafx.jar` from your classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is reported as a source of jar hell. From 6b0036e0e168c24e3ef5752430545f2786f83609 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 17 Jan 2018 21:57:03 -0500 Subject: [PATCH 45/94] Add client actions to action plugin This commit adds an extension point for client actions to action plugins. This is useful for plugins to expose the client-side actions without exposing the server-side implementations to the client. The default implementation, of course, delegates to extracting the client-side action from the server-side implementation. Relates #28280 --- .../client/transport/TransportClient.java | 13 +++++++++++-- .../org/elasticsearch/plugins/ActionPlugin.java | 10 ++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index dc0f7b015632e..c9c575df724df 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -195,8 +196,16 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings final TransportClientNodesService nodesService = new TransportClientNodesService(settings, transportService, threadPool, failureListner == null ? (t, e) -> {} : failureListner); - final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, - actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList())); + + // construct the list of client actions + final List actionPlugins = pluginsService.filterPlugins(ActionPlugin.class); + final List clientActions = + actionPlugins.stream().flatMap(p -> p.getClientActions().stream()).collect(Collectors.toList()); + // add all the base actions + final List> baseActions = + actionModule.getActions().values().stream().map(ActionPlugin.ActionHandler::getAction).collect(Collectors.toList()); + clientActions.addAll(baseActions); + final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, clientActions); List pluginLifecycleComponents = new ArrayList<>(pluginsService.getGuiceServiceClasses().stream() .map(injector::getInstance).collect(Collectors.toList())); diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 41f0ed86116ad..7454d74349ea6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -42,6 +42,7 @@ import java.util.Objects; import java.util.function.Supplier; import java.util.function.UnaryOperator; +import java.util.stream.Collectors; /** * An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this: @@ -62,6 +63,15 @@ public interface ActionPlugin { default List> getActions() { return Collections.emptyList(); } + + /** + * Client actions added by this plugin. This defaults to all of the {@linkplain GenericAction} in + * {@linkplain ActionPlugin#getActions()}. + */ + default List getClientActions() { + return getActions().stream().map(a -> a.action).collect(Collectors.toList()); + } + /** * Action filters added by this plugin. */ From cefea1a7c994e68553938ead2564f2bb1fa88588 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 17 Jan 2018 19:47:37 -0800 Subject: [PATCH 46/94] Build: Add gradle plugin for configuring meta plugin (#28276) This commit adds a gradle plugin to ease development of meta plugins. Applying the plugin will generated the meta plugin properties based on the es_meta_plugin configuration object, which includes name and description. The plugins to include within the meta plugin are configured through the `plugins` list. An integ test task is also automatically added. --- .../plugin/MetaPluginBuildPlugin.groovy | 82 +++++++++++++++++++ .../MetaPluginPropertiesExtension.groovy | 46 +++++++++++ .../plugin/MetaPluginPropertiesTask.groovy | 68 +++++++++++++++ .../gradle/test/ClusterFormationTasks.groovy | 5 +- .../elasticsearch.es-meta-plugin.properties | 20 +++++ plugins/examples/meta-plugin/build.gradle | 38 ++------- 6 files changed, 224 insertions(+), 35 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy new file mode 100644 index 0000000000000..4e02d22398660 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.elasticsearch.gradle.test.RestIntegTestTask +import org.elasticsearch.gradle.test.RestTestPlugin +import org.elasticsearch.gradle.test.StandaloneRestTestPlugin +import org.gradle.api.Plugin +import org.gradle.api.Project +import org.gradle.api.file.FileCopyDetails +import org.gradle.api.file.RelativePath +import org.gradle.api.tasks.bundling.Zip + +class MetaPluginBuildPlugin implements Plugin { + + @Override + void apply(Project project) { + project.plugins.apply(StandaloneRestTestPlugin) + project.plugins.apply(RestTestPlugin) + + createBundleTask(project) + + project.integTestCluster { + dependsOn(project.bundlePlugin) + distribution = 'zip' + setupCommand 'installMetaPlugin', + 'bin/elasticsearch-plugin', 'install', 'file:' + project.bundlePlugin.archivePath + } + } + + private static void createBundleTask(Project project) { + + MetaPluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', MetaPluginPropertiesTask.class) + + // create the actual bundle task, which zips up all the files for the plugin + Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) { + into('elasticsearch') { + from(buildProperties.descriptorOutput.parentFile) { + // plugin properties file + include(buildProperties.descriptorOutput.name) + } + } + + } + project.assemble.dependsOn(bundle) + + // a super hacky way to inject code to run at the end of each of the bundled plugin's configuration + // to add itself back to this meta plugin zip + project.afterEvaluate { + buildProperties.extension.plugins.each { String bundledPluginProjectName -> + Project bundledPluginProject = project.project(bundledPluginProjectName) + bundledPluginProject.afterEvaluate { + bundle.configure { + dependsOn bundledPluginProject.bundlePlugin + from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { + eachFile { FileCopyDetails details -> + details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, details.name) + } + } + } + } + } + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy new file mode 100644 index 0000000000000..e5d84002e533f --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.gradle.api.Project +import org.gradle.api.tasks.Input + +/** + * A container for meta plugin properties that will be written to the meta plugin descriptor, for easy + * manipulation in the gradle DSL. + */ +class MetaPluginPropertiesExtension { + @Input + String name + + @Input + String description + + /** + * The plugins this meta plugin wraps. + * Note this is not written to the plugin descriptor, but used to setup the final zip file task. + */ + @Input + List plugins + + MetaPluginPropertiesExtension(Project project) { + name = project.name + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy new file mode 100644 index 0000000000000..e868cc2cc3128 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.plugin + +import org.gradle.api.InvalidUserDataException +import org.gradle.api.Task +import org.gradle.api.tasks.Copy +import org.gradle.api.tasks.OutputFile + +class MetaPluginPropertiesTask extends Copy { + + MetaPluginPropertiesExtension extension + + @OutputFile + File descriptorOutput = new File(project.buildDir, 'generated-resources/meta-plugin-descriptor.properties') + + MetaPluginPropertiesTask() { + File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}") + Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') { + doLast { + InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}") + templateFile.parentFile.mkdirs() + templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8') + } + } + + dependsOn(copyPluginPropertiesTemplate) + extension = project.extensions.create('es_meta_plugin', MetaPluginPropertiesExtension, project) + project.afterEvaluate { + // check require properties are set + if (extension.name == null) { + throw new InvalidUserDataException('name is a required setting for es_meta_plugin') + } + if (extension.description == null) { + throw new InvalidUserDataException('description is a required setting for es_meta_plugin') + } + // configure property substitution + from(templateFile.parentFile).include(descriptorOutput.name) + into(descriptorOutput.parentFile) + Map properties = generateSubstitutions() + expand(properties) + inputs.properties(properties) + } + } + + Map generateSubstitutions() { + return ['name': extension.name, + 'description': extension.description + ] + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index a64c39171a204..d39de58382520 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -23,6 +23,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder @@ -753,9 +754,9 @@ class ClusterFormationTasks { } static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) { - if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { + if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) { throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + - "[${project.path}] dependencies: the plugin is not an esplugin") + "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") } } } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties new file mode 100644 index 0000000000000..50240e95416c7 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties @@ -0,0 +1,20 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +implementation-class=org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin diff --git a/plugins/examples/meta-plugin/build.gradle b/plugins/examples/meta-plugin/build.gradle index 3674837b0b2f9..db28e6378713e 100644 --- a/plugins/examples/meta-plugin/build.gradle +++ b/plugins/examples/meta-plugin/build.gradle @@ -18,39 +18,11 @@ */ // A meta plugin packaging example that bundles multiple plugins in a single zip. -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' -File plugins = new File(buildDir, 'plugins-unzip') -subprojects { - // unzip the subproject plugins - task unzip(type:Copy, dependsOn: "${project.path}:bundlePlugin") { - File dest = new File(plugins, project.name) - from { zipTree(project(project.path).bundlePlugin.outputs.files.singleFile) } - eachFile { f -> f.path = f.path.replaceFirst('elasticsearch', '') } - into dest - } -} - -// Build the meta plugin zip from the subproject plugins (unzipped) -task buildZip(type:Zip) { - subprojects.each { dependsOn("${it.name}:unzip") } - from plugins - from 'src/main/resources/meta-plugin-descriptor.properties' - into 'elasticsearch' - includeEmptyDirs false -} - -integTestCluster { - dependsOn buildZip - - // This is important, so that all the modules are available too. - // There are index templates that use token filters that are in analysis-module and - // processors are being used that are in ingest-common module. - distribution = 'zip' +apply plugin: 'elasticsearch.es-meta-plugin' - // Install the meta plugin before start. - setupCommand 'installMetaPlugin', - 'bin/elasticsearch-plugin', 'install', 'file:' + buildZip.archivePath +es_meta_plugin { + name 'meta-plugin' + description 'example meta plugin' + plugins = ['dummy-plugin1', 'dummy-plugin2'] } -check.dependsOn integTest From 943542384437e30ecebbd1e87d48045ca11fe484 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 17 Jan 2018 19:54:43 -0800 Subject: [PATCH 47/94] Build: Automatically add projects under libs, qa, modules and plugins (#28279) This commit lessens the burden on configuring settings.gradle when new projects are added. In particular, this makes it trivial to move a plugin to a module (or vice versa). --- libs/build.gradle | 0 qa/build.gradle | 0 settings.gradle | 156 ++++++++++++++-------------------------------- 3 files changed, 46 insertions(+), 110 deletions(-) create mode 100644 libs/build.gradle create mode 100644 qa/build.gradle diff --git a/libs/build.gradle b/libs/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/build.gradle b/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/settings.gradle b/settings.gradle index 46ecb3dad1c97..c135b431a2370 100644 --- a/settings.gradle +++ b/settings.gradle @@ -27,78 +27,56 @@ List projects = [ 'test:fixtures:hdfs-fixture', 'test:fixtures:krb5kdc-fixture', 'test:fixtures:old-elasticsearch', - 'test:logger-usage', - 'libs:elasticsearch-core', - 'libs:elasticsearch-nio', - 'modules:aggs-matrix-stats', - 'modules:analysis-common', - 'modules:ingest-common', - 'modules:lang-expression', - 'modules:lang-mustache', - 'modules:lang-painless', - 'modules:mapper-extras', - 'modules:parent-join', - 'modules:percolator', - 'modules:rank-eval', - 'modules:reindex', - 'modules:repository-url', - 'modules:transport-netty4', - 'modules:tribe', - 'plugins:analysis-icu', - 'plugins:analysis-kuromoji', - 'plugins:analysis-phonetic', - 'plugins:analysis-smartcn', - 'plugins:analysis-stempel', - 'plugins:analysis-ukrainian', - 'plugins:discovery-azure-classic', - 'plugins:discovery-ec2', - 'plugins:discovery-file', - 'plugins:discovery-gce', - 'plugins:ingest-geoip', - 'plugins:ingest-attachment', - 'plugins:ingest-user-agent', - 'plugins:mapper-murmur3', - 'plugins:mapper-size', - 'plugins:repository-azure', - 'plugins:repository-gcs', - 'plugins:repository-hdfs', - 'plugins:repository-s3', - 'plugins:jvm-example', - 'plugins:store-smb', - 'plugins:transport-nio', - 'qa:auto-create-index', - 'qa:ccs-unavailable-clusters', - 'qa:evil-tests', - 'qa:full-cluster-restart', - 'qa:integration-bwc', - 'qa:mixed-cluster', - 'qa:multi-cluster-search', - 'qa:no-bootstrap-tests', - 'qa:reindex-from-old', - 'qa:rolling-upgrade', - 'qa:smoke-test-client', - 'qa:smoke-test-http', - 'qa:smoke-test-ingest-with-all-dependencies', - 'qa:smoke-test-ingest-disabled', - 'qa:smoke-test-multinode', - 'qa:smoke-test-rank-eval-with-mustache', - 'qa:smoke-test-plugins', - 'qa:smoke-test-reindex-with-all-modules', - 'qa:smoke-test-tribe-node', - 'qa:vagrant', - 'qa:verify-version-constants', - 'qa:wildfly', - 'qa:query-builder-bwc' + 'test:logger-usage' ] -projects.add("libs") -File libsDir = new File(rootProject.projectDir, 'libs') -for (File libDir : new File(rootProject.projectDir, 'libs').listFiles()) { - if (libDir.isDirectory() == false) continue; - if (libDir.name.startsWith('build') || libDir.name.startsWith('.')) continue; - projects.add("libs:${libDir.name}".toString()) +/** + * Iterates over sub directories, looking for build.gradle, and adds a project if found + * for that dir with the given path prefix. Note that this requires each level + * of the dir hierarchy to have a build.gradle. Otherwise we would have to iterate + * all files/directories in the source tree to find all projects. + */ +void addSubProjects(String path, File dir, List projects, List branches) { + if (dir.isDirectory() == false) return; + if (dir.name == 'buildSrc') return; + if (new File(dir, 'build.gradle').exists() == false) return; + if (findProject(dir) != null) return; + + final String projectName = "${path}:${dir.name}" + include projectName + + if (dir.name == 'bwc-snapshot-dummy-projects') { + for (final String branch : branches) { + final String snapshotProjectName = "${projectName}:bwc-snapshot-${branch}" + projects.add(snapshotProjectName) + include snapshotProjectName + project("${snapshotProjectName}").projectDir = dir + } + // TODO do we want to assert that there's nothing else in the bwc directory? + } else { + if (path.isEmpty() || path.startsWith(':example-plugins')) { + project(projectName).projectDir = dir + } + for (File subdir : dir.listFiles()) { + addSubProjects(projectName, subdir, projects, branches) + } + } } +// include example plugins first, so adding plugin dirs below won't muck with :example-plugins +File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') +for (File example : examplePluginsDir.listFiles()) { + if (example.isDirectory() == false) continue; + if (example.name.startsWith('build') || example.name.startsWith('.')) continue; + addSubProjects(':example-plugins', example, projects, []) +} +project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') + +addSubProjects('', new File(rootProject.projectDir, 'libs'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'modules'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'plugins'), projects, []) +addSubProjects('', new File(rootProject.projectDir, 'qa'), projects, []) + /* Create projects for building BWC snapshot distributions from the heads of other branches */ final List branches = ['5.6', '6.0', '6.1', '6.x'] for (final String branch : branches) { @@ -139,47 +117,6 @@ if (isEclipse) { project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' } -/** - * Iterates over sub directories, looking for build.gradle, and adds a project if found - * for that dir with the given path prefix. Note that this requires each level - * of the dir hierarchy to have a build.gradle. Otherwise we would have to iterate - * all files/directories in the source tree to find all projects. - */ -void addSubProjects(String path, File dir, List projects, List branches) { - if (dir.isDirectory() == false) return; - if (dir.name == 'buildSrc') return; - if (new File(dir, 'build.gradle').exists() == false) return; - - final String projectName = "${path}:${dir.name}" - include projectName - - if (dir.name == 'bwc-snapshot-dummy-projects') { - for (final String branch : branches) { - final String snapshotProjectName = "${projectName}:bwc-snapshot-${branch}" - projects.add(snapshotProjectName) - include snapshotProjectName - project("${snapshotProjectName}").projectDir = dir - } - // TODO do we want to assert that there's nothing else in the bwc directory? - } else { - if (path.isEmpty() || path.startsWith(':example-plugins')) { - project(projectName).projectDir = dir - } - for (File subdir : dir.listFiles()) { - addSubProjects(projectName, subdir, projects, branches) - } - } -} - -// include example plugins -File examplePluginsDir = new File(rootProject.projectDir, 'plugins/examples') -for (File example : examplePluginsDir.listFiles()) { - if (example.isDirectory() == false) continue; - if (example.name.startsWith('build') || example.name.startsWith('.')) continue; - addSubProjects(':example-plugins', example, projects, []) -} -project(':example-plugins').projectDir = new File(rootProject.projectDir, 'plugins/examples') - // look for extra plugins for elasticsearch File extraProjects = new File(rootProject.projectDir.parentFile, "${dirName}-extra") if (extraProjects.exists()) { @@ -187,5 +124,4 @@ if (extraProjects.exists()) { addSubProjects('', extraProjectDir, projects, branches) } } -include 'libs' From c15ae7eb201519d43c7fe1dd99755cfee1000c82 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 17 Jan 2018 20:03:11 -0800 Subject: [PATCH 48/94] Build: Add release flag to groovy compilation (#28277) The build-tools project (ie buildSrc) has groovy code. The -source option was set to java 8, but this is not correct on java 9, and actually causes a warning about not setting boot classpath. This commit adds the --release option to groovy compilation, just like is done for java compilation. --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 0c76ce4fd3a24..08eadd0cfabb7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -43,6 +43,7 @@ import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.bundling.Jar +import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc import org.gradle.internal.jvm.Jvm @@ -455,6 +456,11 @@ class BuildPlugin implements Plugin { // TODO: use native Gradle support for --release when available (cf. https://github.com/gradle/gradle/issues/2510) options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } + // also apply release flag to groovy, which is used in build-tools + project.tasks.withType(GroovyCompile) { + final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) + options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion + } } } From ac1c509844e843ac02812d22de081bdf58b0795e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 17 Jan 2018 21:27:59 -0800 Subject: [PATCH 49/94] Build: Fix subdirectories in meta plugins to be copied correctly (#28282) This commit fixes the copying of files from bundled plugin zips. Previously all files within each zip would be flattened under the bundled plugin name, and the original directories would exist at the top level of the plugin. --- .../gradle/plugin/MetaPluginBuildPlugin.groovy | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index 4e02d22398660..7300195eaffef 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -40,8 +40,7 @@ class MetaPluginBuildPlugin implements Plugin { project.integTestCluster { dependsOn(project.bundlePlugin) distribution = 'zip' - setupCommand 'installMetaPlugin', - 'bin/elasticsearch-plugin', 'install', 'file:' + project.bundlePlugin.archivePath + setupCommand('installMetaPlugin', 'bin/elasticsearch-plugin', 'install', 'file:' + project.bundlePlugin.archivePath) } } @@ -57,6 +56,9 @@ class MetaPluginBuildPlugin implements Plugin { include(buildProperties.descriptorOutput.name) } } + // due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir + // within bundled plugin zips will show up at the root as an empty dir + includeEmptyDirs = false } project.assemble.dependsOn(bundle) @@ -71,7 +73,10 @@ class MetaPluginBuildPlugin implements Plugin { dependsOn bundledPluginProject.bundlePlugin from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { eachFile { FileCopyDetails details -> - details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, details.name) + // paths in the individual plugins begin with elasticsearch, and we want to add in the + // bundled plugin name between that and each filename + details.relativePath = new RelativePath(true, 'elasticsearch', bundledPluginProjectName, + details.relativePath.toString().replace('elasticsearch/', '')) } } } From defb53a0bcd664e3d378536bdae812897bf8fd88 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 18 Jan 2018 09:23:19 +0100 Subject: [PATCH 50/94] add a note regarding rescore and sort (#28251) --- docs/reference/search/request/rescore.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 204960544a688..6e1bb2a9e6ce2 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -15,7 +15,8 @@ Currently the rescore API has only one implementation: the query rescorer, which uses a query to tweak the scoring. In the future, alternative rescorers may be made available, for example, a pair-wise rescorer. -NOTE: the `rescore` phase is not executed when <> is used. +NOTE: An error will be thrown if an explicit <> (other than `_score`) +is provided with a `rescore` query. NOTE: when exposing pagination to your users, you should not change `window_size` as you step through each page (by passing different From 77dcaab34ffc849d49e19f3735e00d567674c3fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 18 Jan 2018 09:32:27 +0100 Subject: [PATCH 51/94] Simplify RankEvalResponse output (#28266) Currenty the rest response of the ranking evaluation API wraps all inside an enclosing `rank_eval` object. This is redundant since it is clear from the API call and it doesn't provide any other useful information. This change removes this. --- .../index/rankeval/RankEvalResponse.java | 2 - .../index/rankeval/RankEvalResponseTests.java | 32 +++++------ .../rest-api-spec/test/rank_eval/10_basic.yml | 56 +++++++++---------- .../rest-api-spec/test/rank_eval/20_dcg.yml | 36 ++++++------ .../test/rank_eval/30_failures.yml | 10 ++-- .../test/rank-eval/30_template.yml | 6 +- 6 files changed, 69 insertions(+), 73 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index 30ffaeff18b92..e8fe182726825 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -112,7 +112,6 @@ public void readFrom(StreamInput in) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject("rank_eval"); builder.field("quality_level", evaluationResult); builder.startObject("details"); for (String key : details.keySet()) { @@ -127,7 +126,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); builder.endObject(); - builder.endObject(); return builder; } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index 827f7be4442e8..881b9e04709a7 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -92,23 +92,21 @@ public void testToXContent() throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); String xContent = response.toXContent(builder, ToXContent.EMPTY_PARAMS).bytes().utf8ToString(); assertEquals(("{" + - " \"rank_eval\": {" + - " \"quality_level\": 0.123," + - " \"details\": {" + - " \"coffee_query\": {" + - " \"quality_level\": 0.1," + - " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + - " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + - " \"rating\":5}," + - " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + - " \"rating\":null}" + - " ]" + - " }" + - " }," + - " \"failures\": {" + - " \"beer_query\": {" + - " \"error\": \"ParsingException[someMsg]\"" + - " }" + + " \"quality_level\": 0.123," + + " \"details\": {" + + " \"coffee_query\": {" + + " \"quality_level\": 0.1," + + " \"unknown_docs\": [{\"_index\":\"index\",\"_id\":\"456\"}]," + + " \"hits\":[{\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"123\",\"_score\":1.0}," + + " \"rating\":5}," + + " {\"hit\":{\"_index\":\"index\",\"_type\":\"\",\"_id\":\"456\",\"_score\":1.0}," + + " \"rating\":null}" + + " ]" + + " }" + + " }," + + " \"failures\": {" + + " \"beer_query\": {" + + " \"error\": \"ParsingException[someMsg]\"" + " }" + " }" + "}").replaceAll("\\s+", ""), xContent); diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index a81df5fa3fafd..2eab6e47e7ff2 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -64,27 +64,27 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 2, "docs_retrieved": 2}} - - length: { rank_eval.details.amsterdam_query.hits: 3} - - match: { rank_eval.details.amsterdam_query.hits.0.hit._id: "doc2"} - - match: { rank_eval.details.amsterdam_query.hits.0.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.1.hit._id: "doc3"} - - match: { rank_eval.details.amsterdam_query.hits.1.rating: 1} - - match: { rank_eval.details.amsterdam_query.hits.2.hit._id: "doc4"} - - is_false: rank_eval.details.amsterdam_query.hits.2.rating + - length: { details.amsterdam_query.hits: 3} + - match: { details.amsterdam_query.hits.0.hit._id: "doc2"} + - match: { details.amsterdam_query.hits.0.rating: 1} + - match: { details.amsterdam_query.hits.1.hit._id: "doc3"} + - match: { details.amsterdam_query.hits.1.rating: 1} + - match: { details.amsterdam_query.hits.2.hit._id: "doc4"} + - is_false: details.amsterdam_query.hits.2.rating - - match: { rank_eval.details.berlin_query.quality_level: 1.0} - - match: { rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} - - match: { rank_eval.details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - length: { rank_eval.details.berlin_query.hits: 2} - - match: { rank_eval.details.berlin_query.hits.0.hit._id: "doc1" } - - match: { rank_eval.details.berlin_query.hits.0.rating: 1} - - match: { rank_eval.details.berlin_query.hits.1.hit._id: "doc4" } - - is_false: rank_eval.details.berlin_query.hits.1.rating + - match: { details.berlin_query.quality_level: 1.0} + - match: { details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc4"}]} + - match: { details.berlin_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - length: { details.berlin_query.hits: 2} + - match: { details.berlin_query.hits.0.hit._id: "doc1" } + - match: { details.berlin_query.hits.0.rating: 1} + - match: { details.berlin_query.hits.1.hit._id: "doc4" } + - is_false: details.berlin_query.hits.1.rating --- "Mean Reciprocal Rank": @@ -152,14 +152,14 @@ } # average is (1/3 + 1/2)/2 = 5/12 ~ 0.41666666666666663 - - gt: {rank_eval.quality_level: 0.416} - - lt: {rank_eval.quality_level: 0.417} - - gt: {rank_eval.details.amsterdam_query.quality_level: 0.333} - - lt: {rank_eval.details.amsterdam_query.quality_level: 0.334} - - match: {rank_eval.details.amsterdam_query.metric_details: {"first_relevant": 3}} - - match: {rank_eval.details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, + - gt: {quality_level: 0.416} + - lt: {quality_level: 0.417} + - gt: {details.amsterdam_query.quality_level: 0.333} + - lt: {details.amsterdam_query.quality_level: 0.334} + - match: {details.amsterdam_query.metric_details: {"first_relevant": 3}} + - match: {details.amsterdam_query.unknown_docs: [ {"_index": "foo", "_id": "doc2"}, {"_index": "foo", "_id": "doc3"} ]} - - match: {rank_eval.details.berlin_query.quality_level: 0.5} - - match: {rank_eval.details.berlin_query.metric_details: {"first_relevant": 2}} - - match: {rank_eval.details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} + - match: {details.berlin_query.quality_level: 0.5} + - match: {details.berlin_query.metric_details: {"first_relevant": 2}} + - match: {details.berlin_query.unknown_docs: [ {"_index": "foo", "_id": "doc1"}]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml index 0aca6fdde9eae..3a68890dce9f7 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml @@ -69,11 +69,11 @@ "metric" : { "dcg": {}} } - - gt: {rank_eval.quality_level: 13.848263 } - - lt: {rank_eval.quality_level: 13.848264 } - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} + - gt: {quality_level: 13.848263 } + - lt: {quality_level: 13.848264 } + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} # reverse the order in which the results are returned (less relevant docs first) @@ -96,11 +96,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 10.299674} - - lt: {rank_eval.quality_level: 10.299675} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 10.299674} + - lt: {quality_level: 10.299675} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} # if we mix both, we should get the average @@ -134,11 +134,11 @@ "metric" : { "dcg": { }} } - - gt: {rank_eval.quality_level: 12.073969} - - lt: {rank_eval.quality_level: 12.073970} - - gt: {rank_eval.details.dcg_query.quality_level: 13.848263} - - lt: {rank_eval.details.dcg_query.quality_level: 13.848264} - - match: {rank_eval.details.dcg_query.unknown_docs: [ ]} - - gt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299674} - - lt: {rank_eval.details.dcg_query_reverse.quality_level: 10.299675} - - match: {rank_eval.details.dcg_query_reverse.unknown_docs: [ ]} + - gt: {quality_level: 12.073969} + - lt: {quality_level: 12.073970} + - gt: {details.dcg_query.quality_level: 13.848263} + - lt: {details.dcg_query.quality_level: 13.848264} + - match: {details.dcg_query.unknown_docs: [ ]} + - gt: {details.dcg_query_reverse.quality_level: 10.299674} + - lt: {details.dcg_query_reverse.quality_level: 10.299675} + - match: {details.dcg_query_reverse.unknown_docs: [ ]} diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml index 55efcdd104a30..48ea593712ef5 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml @@ -34,9 +34,9 @@ "metric" : { "precision": { "ignore_unlabeled" : true }} } - - match: { rank_eval.quality_level: 1} - - match: { rank_eval.details.amsterdam_query.quality_level: 1.0} - - match: { rank_eval.details.amsterdam_query.unknown_docs: [ ]} - - match: { rank_eval.details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.amsterdam_query.unknown_docs: [ ]} + - match: { details.amsterdam_query.metric_details: {"relevant_docs_retrieved": 1, "docs_retrieved": 1}} - - is_true: rank_eval.failures.invalid_query + - is_true: failures.invalid_query diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml index 0a59b7d073325..9dfbecce75b53 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml @@ -67,6 +67,6 @@ "metric" : { "precision": { }} } - - match: {rank_eval.quality_level: 0.5833333333333333} - - match: {rank_eval.details.berlin_query.unknown_docs.0._id: "doc4"} - - match: {rank_eval.details.amsterdam_query.unknown_docs.0._id: "doc4"} + - match: {quality_level: 0.5833333333333333} + - match: {details.berlin_query.unknown_docs.0._id: "doc4"} + - match: {details.amsterdam_query.unknown_docs.0._id: "doc4"} From 48c8098e15b6af6f2c5f957476373351f4c61406 Mon Sep 17 00:00:00 2001 From: deepybee Date: Thu, 18 Jan 2018 06:51:53 -0200 Subject: [PATCH 52/94] Fixed several typos in analyzers section (#28247) --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 1ce44b6028db8..cb976601fdcbe 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -171,7 +171,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /armenian_example +PUT /basque_example { "settings": { "analysis": { @@ -536,7 +536,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /detch_example +PUT /dutch_example { "settings": { "analysis": { @@ -1554,7 +1554,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /swidish_example +PUT /swedish_example { "settings": { "analysis": { From 06f931fcc415657101c8b31b7124817a066e4330 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 01:02:27 -0800 Subject: [PATCH 53/94] Build: Add run task to meta plugins (#28283) This commit adds a run task to projects using the meta plugin plugin. It also makes the project installable via ClusterConfiguration.plugin. --- .../gradle/plugin/MetaPluginBuildPlugin.groovy | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index 7300195eaffef..8cdaa11c4d755 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -21,6 +21,7 @@ package org.elasticsearch.gradle.plugin import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RestTestPlugin +import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.test.StandaloneRestTestPlugin import org.gradle.api.Plugin import org.gradle.api.Project @@ -42,6 +43,10 @@ class MetaPluginBuildPlugin implements Plugin { distribution = 'zip' setupCommand('installMetaPlugin', 'bin/elasticsearch-plugin', 'install', 'file:' + project.bundlePlugin.archivePath) } + + RunTask run = project.tasks.create('run', RunTask) + run.dependsOn(project.bundlePlugin) + run.clusterConfig.plugin(project.path) } private static void createBundleTask(Project project) { @@ -63,6 +68,10 @@ class MetaPluginBuildPlugin implements Plugin { } project.assemble.dependsOn(bundle) + // also make the zip available as a configuration (used when depending on this project) + project.configurations.create('zip') + project.artifacts.add('zip', bundle) + // a super hacky way to inject code to run at the end of each of the bundled plugin's configuration // to add itself back to this meta plugin zip project.afterEvaluate { From c38c12e3bf6622d2877e5c3137773981f10114dd Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 18 Jan 2018 10:49:34 +0100 Subject: [PATCH 54/94] Fix simple_query_string on invalid input (#28219) This change converts any exception that occurs during the parsing of a simple_query_string to a match_no_docs query (instead of a null query) when leniency is activated. Closes #28204 --- .../search/SimpleQueryStringQueryParser.java | 13 +++++++------ .../query/SimpleQueryStringBuilderTests.java | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index 9f91b16359287..aea3677e33e13 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -86,11 +87,11 @@ private Analyzer getAnalyzer(MappedFieldType ft) { } /** - * Rethrow the runtime exception, unless the lenient flag has been set, returns null + * Rethrow the runtime exception, unless the lenient flag has been set, returns {@link MatchNoDocsQuery} */ private Query rethrowUnlessLenient(RuntimeException e) { if (settings.lenient()) { - return null; + return Queries.newMatchNoDocsQuery("failed query, caused by " + e.getMessage()); } throw e; } @@ -115,7 +116,7 @@ public Query newDefaultQuery(String text) { try { return queryBuilder.parse(MultiMatchQueryBuilder.Type.MOST_FIELDS, weights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } } @@ -135,7 +136,7 @@ public Query newFuzzyQuery(String text, int fuzziness) { settings.fuzzyMaxExpansions, settings.fuzzyTranspositions); disjuncts.add(wrapWithBoost(query, entry.getValue())); } catch (RuntimeException e) { - rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { @@ -156,7 +157,7 @@ public Query newPhraseQuery(String text, int slop) { } return queryBuilder.parse(MultiMatchQueryBuilder.Type.PHRASE, phraseWeights, text, null); } catch (IOException e) { - return rethrowUnlessLenient(new IllegalArgumentException(e.getMessage())); + return rethrowUnlessLenient(new IllegalStateException(e.getMessage())); } finally { queryBuilder.setPhraseSlop(0); } @@ -184,7 +185,7 @@ public Query newPrefixQuery(String text) { disjuncts.add(wrapWithBoost(query, entry.getValue())); } } catch (RuntimeException e) { - return rethrowUnlessLenient(e); + disjuncts.add(rethrowUnlessLenient(e)); } } if (disjuncts.size() == 1) { diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index bfc6fd0600493..dc7c56ce04ebf 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -46,15 +46,18 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -607,6 +610,21 @@ public void testToFuzzyQuery() throws Exception { assertEquals(expected, query); } + public void testLenientToPrefixQuery() throws Exception { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + + Query query = new SimpleQueryStringBuilder("t*") + .field(DATE_FIELD_NAME) + .field(STRING_FIELD_NAME) + .lenient(true) + .toQuery(createShardContext()); + List expectedQueries = new ArrayList<>(); + expectedQueries.add(new MatchNoDocsQuery("")); + expectedQueries.add(new PrefixQuery(new Term(STRING_FIELD_NAME, "t"))); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery(expectedQueries, 1.0f); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) From b4c1c4a78cdaab678b1cc11fa1afa101591f3186 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 18 Jan 2018 11:57:24 +0100 Subject: [PATCH 55/94] REST high-level client: remove index suffix from indices client method names (#28263) Today, the way to call them API under the indices namespace is by doing e.g. `client.indices().createIndex()`. Our spec define the API under the indices namespace as e.g. `indices.create`, hence there is no need to repeat the index suffix for each method as that is already defined by the namespace. Using the `index` suffix in each method was an oversight which must be corrected. --- .../elasticsearch/client/IndicesClient.java | 16 +++++----- .../elasticsearch/client/IndicesClientIT.java | 26 ++++++++-------- .../IndicesClientDocumentationIT.java | 30 +++++++++---------- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 4940267e85c22..2dd130fc6342e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -51,7 +51,7 @@ public final class IndicesClient { * See * Delete Index API on elastic.co */ - public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { + public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -62,7 +62,7 @@ public DeleteIndexResponse deleteIndex(DeleteIndexRequest deleteIndexRequest, He * See * Delete Index API on elastic.co */ - public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { + public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } @@ -73,7 +73,7 @@ public void deleteIndexAsync(DeleteIndexRequest deleteIndexRequest, ActionListen * See * Create Index API on elastic.co */ - public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -84,7 +84,7 @@ public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, He * See * Create Index API on elastic.co */ - public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { + public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } @@ -95,7 +95,7 @@ public void createIndexAsync(CreateIndexRequest createIndexRequest, ActionListen * See * Open Index API on elastic.co */ - public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { + public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -106,7 +106,7 @@ public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... * See * Open Index API on elastic.co */ - public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { + public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } @@ -117,7 +117,7 @@ public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener * Close Index API on elastic.co */ - public CloseIndexResponse closeIndex(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { + public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, Collections.emptySet(), headers); } @@ -128,7 +128,7 @@ public CloseIndexResponse closeIndex(CloseIndexRequest closeIndexRequest, Header * See * Close Index API on elastic.co */ - public void closeIndexAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { + public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, listener, Collections.emptySet(), headers); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 361b60a5218cf..5f8702807fb30 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -55,7 +55,7 @@ public void testCreateIndex() throws IOException { CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); assertTrue(indexExists(indexName)); @@ -83,7 +83,7 @@ public void testCreateIndex() throws IOException { createIndexRequest.mapping("type_name", mappingBuilder); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::createIndex, highLevelClient().indices()::createIndexAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); Map indexMetaData = getIndexMetadata(indexName); @@ -116,7 +116,7 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); DeleteIndexResponse deleteIndexResponse = - execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync); + execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); assertTrue(deleteIndexResponse.isAcknowledged()); assertFalse(indexExists(indexName)); @@ -129,7 +129,7 @@ public void testDeleteIndex() throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteIndexRequest, highLevelClient().indices()::deleteIndex, highLevelClient().indices()::deleteIndexAsync)); + () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -143,8 +143,8 @@ public void testOpenExistingIndex() throws IOException { assertThat(exception.getMessage().contains(index), equalTo(true)); OpenIndexRequest openIndexRequest = new OpenIndexRequest(index); - OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertTrue(openIndexResponse.isAcknowledged()); Response response = client().performRequest("GET", index + "/_search"); @@ -157,19 +157,19 @@ public void testOpenNonExistentIndex() throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex, - highLevelClient().indices()::openIndexAsync); + OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } @@ -180,8 +180,8 @@ public void testCloseExistingIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); - CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::closeIndex, - highLevelClient().indices()::closeIndexAsync); + CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close, + highLevelClient().indices()::closeAsync); assertTrue(closeIndexResponse.isAcknowledged()); ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search")); @@ -195,7 +195,7 @@ public void testCloseNonExistentIndex() throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(closeIndexRequest, highLevelClient().indices()::closeIndex, highLevelClient().indices()::closeIndexAsync)); + () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index dd7e53eaa954c..bc3b1698f9679 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -62,7 +62,7 @@ public void testDeleteIndex() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -84,7 +84,7 @@ public void testDeleteIndex() throws IOException { // end::delete-index-request-indicesOptions // tag::delete-index-execute - DeleteIndexResponse deleteIndexResponse = client.indices().deleteIndex(request); + DeleteIndexResponse deleteIndexResponse = client.indices().delete(request); // end::delete-index-execute // tag::delete-index-response @@ -97,7 +97,7 @@ public void testDeleteIndex() throws IOException { // tag::delete-index-notfound try { DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist"); - client.indices().deleteIndex(request); + client.indices().delete(request); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -111,7 +111,7 @@ public void testDeleteIndexAsync() throws Exception { final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -119,7 +119,7 @@ public void testDeleteIndexAsync() throws Exception { DeleteIndexRequest request = new DeleteIndexRequest("posts"); // tag::delete-index-execute-async - client.indices().deleteIndexAsync(request, new ActionListener() { + client.indices().deleteAsync(request, new ActionListener() { @Override public void onResponse(DeleteIndexResponse deleteIndexResponse) { // <1> @@ -189,7 +189,7 @@ public void testCreateIndex() throws IOException { // end::create-index-request-waitForActiveShards // tag::create-index-execute - CreateIndexResponse createIndexResponse = client.indices().createIndex(request); + CreateIndexResponse createIndexResponse = client.indices().create(request); // end::create-index-execute // tag::create-index-response @@ -207,7 +207,7 @@ public void testCreateIndexAsync() throws Exception { { CreateIndexRequest request = new CreateIndexRequest("twitter"); // tag::create-index-execute-async - client.indices().createIndexAsync(request, new ActionListener() { + client.indices().createAsync(request, new ActionListener() { @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -232,7 +232,7 @@ public void testOpenIndex() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -260,7 +260,7 @@ public void testOpenIndex() throws IOException { // end::open-index-request-indicesOptions // tag::open-index-execute - OpenIndexResponse openIndexResponse = client.indices().openIndex(request); + OpenIndexResponse openIndexResponse = client.indices().open(request); // end::open-index-execute // tag::open-index-response @@ -271,7 +271,7 @@ public void testOpenIndex() throws IOException { assertTrue(shardsAcked); // tag::open-index-execute-async - client.indices().openIndexAsync(request, new ActionListener() { + client.indices().openAsync(request, new ActionListener() { @Override public void onResponse(OpenIndexResponse openIndexResponse) { // <1> @@ -289,7 +289,7 @@ public void onFailure(Exception e) { // tag::open-index-notfound try { OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); - client.indices().openIndex(request); + client.indices().open(request); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.BAD_REQUEST) { // <1> @@ -303,7 +303,7 @@ public void testCloseIndex() throws IOException { RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().createIndex(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); assertTrue(createIndexResponse.isAcknowledged()); } @@ -326,7 +326,7 @@ public void testCloseIndex() throws IOException { // end::close-index-request-indicesOptions // tag::close-index-execute - CloseIndexResponse closeIndexResponse = client.indices().closeIndex(request); + CloseIndexResponse closeIndexResponse = client.indices().close(request); // end::close-index-execute // tag::close-index-response @@ -335,7 +335,7 @@ public void testCloseIndex() throws IOException { assertTrue(acknowledged); // tag::close-index-execute-async - client.indices().closeIndexAsync(request, new ActionListener() { + client.indices().closeAsync(request, new ActionListener() { @Override public void onResponse(CloseIndexResponse closeIndexResponse) { // <1> @@ -353,7 +353,7 @@ public void onFailure(Exception e) { // tag::close-index-notfound try { CloseIndexRequest request = new CloseIndexRequest("does_not_exist"); - client.indices().closeIndex(request); + client.indices().close(request); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.BAD_REQUEST) { // <1> From 0dfb65a6eea50215463d05230505d9661f5c0399 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 18 Jan 2018 12:56:54 +0100 Subject: [PATCH 56/94] [TEST] fix RequestTests#testSearch in case search source is not set The search request body can never be null as `SearchRequest` doesn't allow the inner `SearchSourceBuilder` to be null. Instead, when search source is not set, the request body is going to be an empty json object (`{}``) --- .../elasticsearch/client/RequestTests.java | 83 +++++++++---------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index bfc868707a8c3..56848a905a1cd 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -794,44 +794,47 @@ public void testSearch() throws Exception { setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); - SearchSourceBuilder searchSourceBuilder = null; + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + //rarely skip setting the search source completely if (frequently()) { - searchSourceBuilder = new SearchSourceBuilder(); - if (randomBoolean()) { - searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); - } - if (randomBoolean()) { - searchSourceBuilder.minScore(randomFloat()); - } - if (randomBoolean()) { - searchSourceBuilder.explain(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.profile(randomBoolean()); - } - if (randomBoolean()) { - searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) - .field(randomAlphaOfLengthBetween(3, 10))); - } - if (randomBoolean()) { - searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), - new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.addRescorer(new QueryRescorerBuilder( - new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); - } - if (randomBoolean()) { - searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + //frequently set the search source to have some content, otherwise leave it empty but still set it + if (frequently()) { + if (randomBoolean()) { + searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + searchSourceBuilder.minScore(randomFloat()); + } + if (randomBoolean()) { + searchSourceBuilder.explain(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.profile(randomBoolean()); + } + if (randomBoolean()) { + searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING) + .field(randomAlphaOfLengthBetween(3, 10))); + } + if (randomBoolean()) { + searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10), + new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.addRescorer(new QueryRescorerBuilder( + new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)))); + } + if (randomBoolean()) { + searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10))); + } } searchRequest.source(searchSourceBuilder); } @@ -849,11 +852,7 @@ public void testSearch() throws Exception { endpoint.add("_search"); assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - if (searchSourceBuilder == null) { - assertNull(request.getEntity()); - } else { - assertToXContentBody(searchSourceBuilder, request.getEntity()); - } + assertToXContentBody(searchSourceBuilder, request.getEntity()); } public void testMultiSearch() throws IOException { From c92b42ef840bbaf2b4b0346a7160ba5f444dd9ad Mon Sep 17 00:00:00 2001 From: David Shimon Date: Thu, 18 Jan 2018 16:28:51 +0200 Subject: [PATCH 57/94] Docs: match between snippet to its description (#28296) s/400/200/ in the text to match a snippet. --- .../aggregations/pipeline/bucket-selector-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 5dc1b80d4adda..4cc532c99c5d2 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -41,7 +41,7 @@ for more details) |Required | details)|Optional |`skip` |=== -The following snippet only retains buckets where the total sales for the month is more than 400: +The following snippet only retains buckets where the total sales for the month is more than 200: [source,js] -------------------------------------------------- From 66c81e7f5e2d2af8c204c816d84c28a463e5e71c Mon Sep 17 00:00:00 2001 From: Jin Liang Date: Thu, 18 Jan 2018 11:06:20 -0600 Subject: [PATCH 58/94] [Docs] Update tophits-aggregation.asciidoc (#28273) --- .../reference/aggregations/metrics/tophits-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index e5966e56b35dd..0c19bf172bbf0 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-metrics-top-hits-aggregation]] -=== Top hits Aggregation +=== Top Hits Aggregation A `top_hits` metric aggregator keeps track of the most relevant document being aggregated. This aggregator is intended to be used as a sub aggregator, so that the top matching documents can be aggregated per bucket. From 4214a718ec6d658b4dfb19e7ce38c6c3573ec8c7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 18 Jan 2018 12:08:32 -0500 Subject: [PATCH 59/94] Fork Groovy compiler onto compile Java home We use the --release flag which is only available starting in JDK 9. Since Gradle could be running on JDK 8 without forking the compiler, compilation will occur with the Java home of Gradle. This commit adds a fork flag to the compiler Java home so that we use the right compiler. Relates #28300 --- .../src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 08eadd0cfabb7..dd1459418d013 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -459,6 +459,8 @@ class BuildPlugin implements Plugin { // also apply release flag to groovy, which is used in build-tools project.tasks.withType(GroovyCompile) { final JavaVersion targetCompatibilityVersion = JavaVersion.toVersion(it.targetCompatibility) + options.fork = true + options.forkOptions.javaHome = new File(project.compilerJavaHome) options.compilerArgs << '--release' << targetCompatibilityVersion.majorVersion } } From 20fb7a6d87059f4b4eed562a67dba589c85faf61 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 18 Jan 2018 10:59:42 -0700 Subject: [PATCH 60/94] Modify Abstract transport tests to use impls (#28270) There a number of tests in `AbstractSimpleTransportTestCase` that create `MockTcpTransport` impls. This commit modifies two of these tests to use the transport implementation that is being tested. --- .../AbstractSimpleTransportTestCase.java | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 20971b3865ea1..ed0431d96785c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1927,16 +1927,12 @@ public void testTimeoutPerConnection() throws IOException { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = Version.fromString("2.0.0"); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), version0); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, @@ -1950,17 +1946,12 @@ public void testHandshakeWithIncompatVersion() { public void testHandshakeUpdatesVersion() throws IOException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version); - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, null, - Collections.emptySet())) { + try (MockTransportService service = build(Settings.EMPTY, version, null, true)) { service.start(); service.acceptIncomingRequests(); - DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), - Version.fromString("2.0.0")); + TransportAddress address = service.boundAddress().publishAddress(); + DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), Version.fromString("2.0.0")); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, From e442a34acc775434e5b69d85ac5e0c06ab93de72 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 11:14:56 -0800 Subject: [PATCH 61/94] Build: Fix meta plugin integ test installation (#28286) Integ test clusters should use the plugin method of ClusterConfiguration to install plugins. Without it, meta plugins install based on the name of the project directory, rather than the actual configured plugin name. This commit fixes that, and also corrects the distribution used to be the default integ-test-zip, to match that of PluginBuildPlugin. This ensures plugins are tested in isolation by default. --- .../elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index 8cdaa11c4d755..fbef0d8e49721 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -40,8 +40,7 @@ class MetaPluginBuildPlugin implements Plugin { project.integTestCluster { dependsOn(project.bundlePlugin) - distribution = 'zip' - setupCommand('installMetaPlugin', 'bin/elasticsearch-plugin', 'install', 'file:' + project.bundlePlugin.archivePath) + plugin(project.path) } RunTask run = project.tasks.create('run', RunTask) From de9d903b1e834184518a581d857b17fc4e21124f Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 12:01:20 -0800 Subject: [PATCH 62/94] Plugins: Fix meta plugins to install bundled plugins with their real name (#28285) Meta plugins move the unzipped plugin as is, but the inner plugins may have a different directory name than their corresponding plugin properties file specifies. This commit fixes installation to rename the directory if necessary. --- .../plugins/InstallPluginCommand.java | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index a8b7db48a7c1c..5675d3e80070f 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -646,9 +646,11 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { final MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(tmpRoot); verifyPluginName(env.pluginsFile(), metaInfo.getName(), tmpRoot); + final Path destination = env.pluginsFile().resolve(metaInfo.getName()); deleteOnFailure.add(destination); terminal.println(VERBOSE, metaInfo.toString()); + final List pluginPaths = new ArrayList<>(); try (DirectoryStream paths = Files.newDirectoryStream(tmpRoot)) { // Extract bundled plugins path and validate plugin names @@ -665,19 +667,11 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, for (Path plugin : pluginPaths) { final PluginInfo info = verify(terminal, plugin, isBatch, env); pluginInfos.add(info); - Path tmpBinDir = plugin.resolve("bin"); - if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(metaInfo.getName()); - deleteOnFailure.add(destBinDir); - installBin(info, tmpBinDir, destBinDir); - } - - Path tmpConfigDir = plugin.resolve("config"); - if (Files.exists(tmpConfigDir)) { - // some files may already exist, and we don't remove plugin config files on plugin removal, - // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(metaInfo.getName()); - installConfig(info, tmpConfigDir, destConfigDir); + installPluginSupportFiles(info, plugin, env.binFile().resolve(metaInfo.getName()), + env.configFile().resolve(metaInfo.getName()), deleteOnFailure); + // ensure the plugin dir within the tmpRoot has the correct name + if (plugin.getFileName().toString().equals(info.getName()) == false) { + Files.move(plugin, plugin.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE); } } movePlugin(tmpRoot, destination); @@ -693,7 +687,7 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, /** * Installs the plugin from {@code tmpRoot} into the plugins dir. - * If the plugin has a bin dir and/or a config dir, those are copied. + * If the plugin has a bin dir and/or a config dir, those are moved. */ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { @@ -701,9 +695,20 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, final Path destination = env.pluginsFile().resolve(info.getName()); deleteOnFailure.add(destination); + installPluginSupportFiles(info, tmpRoot, env.binFile().resolve(info.getName()), + env.configFile().resolve(info.getName()), deleteOnFailure); + movePlugin(tmpRoot, destination); + if (info.requiresKeystore()) { + createKeystoreIfNeeded(terminal, env, info); + } + terminal.println("-> Installed " + info.getName()); + } + + /** Moves bin and config directories from the plugin if they exist */ + private void installPluginSupportFiles(PluginInfo info, Path tmpRoot, + Path destBinDir, Path destConfigDir, List deleteOnFailure) throws Exception { Path tmpBinDir = tmpRoot.resolve("bin"); if (Files.exists(tmpBinDir)) { - Path destBinDir = env.binFile().resolve(info.getName()); deleteOnFailure.add(destBinDir); installBin(info, tmpBinDir, destBinDir); } @@ -712,14 +717,8 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, if (Files.exists(tmpConfigDir)) { // some files may already exist, and we don't remove plugin config files on plugin removal, // so any installed config files are left on failure too - Path destConfigDir = env.configFile().resolve(info.getName()); installConfig(info, tmpConfigDir, destConfigDir); } - movePlugin(tmpRoot, destination); - if (info.requiresKeystore()) { - createKeystoreIfNeeded(terminal, env, info); - } - terminal.println("-> Installed " + info.getName()); } /** Moves the plugin directory into its final destination. **/ From a6a57a71d3adf770b4ab85aa5a2056477ff18eb3 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 18 Jan 2018 13:06:40 -0700 Subject: [PATCH 63/94] Implement socket and server ChannelContexts (#28275) This commit is related to #27260. Currently have a channel context that implements reading and writing logic for socket channels. Additionally, we have exception contexts to handle exceptions. And accepting contexts to handle accepted channels. This PR introduces a ChannelContext that handles close and exception handling for all channel types. Additionally, it has implementers that provide specific functionality for socket channels (read and writing). And specific functionality for server channels (accepting). --- .../elasticsearch/nio/AbstractNioChannel.java | 12 +- .../nio/AcceptorEventHandler.java | 2 +- .../nio/BytesChannelContext.java | 55 ++++---- .../org/elasticsearch/nio/ChannelContext.java | 56 ++------ .../org/elasticsearch/nio/ChannelFactory.java | 1 - .../org/elasticsearch/nio/EventHandler.java | 2 +- .../nio/InboundChannelBuffer.java | 4 + .../org/elasticsearch/nio/NioChannel.java | 4 + .../nio/NioServerSocketChannel.java | 24 ++-- .../elasticsearch/nio/NioSocketChannel.java | 41 +----- .../nio/ServerChannelContext.java | 62 +++++++++ .../nio/SocketChannelContext.java | 129 ++++++++++++++++++ .../elasticsearch/nio/SocketEventHandler.java | 29 +--- .../org/elasticsearch/nio/SocketSelector.java | 2 +- .../org/elasticsearch/nio/WriteOperation.java | 2 +- .../nio/AcceptorEventHandlerTests.java | 12 +- .../nio/BytesChannelContextTests.java | 14 +- .../nio/ChannelFactoryTests.java | 3 +- .../nio/NioServerSocketChannelTests.java | 76 +++++------ .../nio/NioSocketChannelTests.java | 122 +++++------------ .../nio/SocketEventHandlerTests.java | 31 ++--- .../nio/SocketSelectorTests.java | 4 +- .../transport/nio/NioTransport.java | 21 +-- .../nio/TcpNioServerSocketChannel.java | 4 +- .../transport/nio/MockNioTransport.java | 13 +- 25 files changed, 393 insertions(+), 332 deletions(-) create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java create mode 100644 libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java index e3dcbad024cb2..14e2365eb7e82 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AbstractNioChannel.java @@ -79,7 +79,7 @@ public void closeFromSelector() throws IOException { selector.assertOnSelectorThread(); if (closeContext.isDone() == false) { try { - closeRawChannel(); + socketChannel.close(); closeContext.complete(null); } catch (IOException e) { closeContext.completeExceptionally(e); @@ -119,13 +119,13 @@ public void addCloseListener(BiConsumer listener) { closeContext.whenComplete(listener); } + @Override + public void close() { + getContext().closeChannel(); + } + // Package visibility for testing void setSelectionKey(SelectionKey selectionKey) { this.selectionKey = selectionKey; } - // Package visibility for testing - - void closeRawChannel() throws IOException { - socketChannel.close(); - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java index a5727d9ef597a..eb5194f21ef3b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java @@ -67,7 +67,7 @@ protected void acceptChannel(NioServerSocketChannel nioServerChannel) throws IOE ChannelFactory channelFactory = nioServerChannel.getChannelFactory(); SocketSelector selector = selectorSupplier.get(); NioSocketChannel nioSocketChannel = channelFactory.acceptNioChannel(nioServerChannel, selector); - nioServerChannel.getAcceptContext().accept(nioSocketChannel); + nioServerChannel.getContext().acceptChannel(nioSocketChannel); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java index 893c6986bdda7..5d77675aa4819 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -26,25 +26,20 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -public class BytesChannelContext implements ChannelContext { +public class BytesChannelContext extends SocketChannelContext { - private final NioSocketChannel channel; private final ReadConsumer readConsumer; private final InboundChannelBuffer channelBuffer; private final LinkedList queued = new LinkedList<>(); private final AtomicBoolean isClosing = new AtomicBoolean(false); - private boolean peerClosed = false; - private boolean ioException = false; - public BytesChannelContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - this.channel = channel; + public BytesChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler, + ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { + super(channel, exceptionHandler); this.readConsumer = readConsumer; this.channelBuffer = channelBuffer; } - @Override - public void channelRegistered() throws IOException {} - @Override public int read() throws IOException { if (channelBuffer.getRemaining() == 0) { @@ -52,16 +47,9 @@ public int read() throws IOException { channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); } - int bytesRead; - try { - bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); - } catch (IOException ex) { - ioException = true; - throw ex; - } + int bytesRead = readFromChannel(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); - if (bytesRead == -1) { - peerClosed = true; + if (bytesRead == 0) { return 0; } @@ -90,7 +78,6 @@ public void sendMessage(ByteBuffer[] buffers, BiConsumer listen return; } - // TODO: Eval if we will allow writes from sendMessage selector.queueWriteInChannelBuffer(writeOperation); } @@ -126,28 +113,38 @@ public void closeChannel() { @Override public boolean selectorShouldClose() { - return peerClosed || ioException || isClosing.get(); + return isPeerClosed() || hasIOException() || isClosing.get(); } @Override - public void closeFromSelector() { + public void closeFromSelector() throws IOException { channel.getSelector().assertOnSelectorThread(); - // Set to true in order to reject new writes before queuing with selector - isClosing.set(true); - channelBuffer.close(); - for (BytesWriteOperation op : queued) { - channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + if (channel.isOpen()) { + IOException channelCloseException = null; + try { + channel.closeFromSelector(); + } catch (IOException e) { + channelCloseException = e; + } + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + channelBuffer.close(); + for (BytesWriteOperation op : queued) { + channel.getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + } + queued.clear(); + if (channelCloseException != null) { + throw channelCloseException; + } } - queued.clear(); } private void singleFlush(BytesWriteOperation headOp) throws IOException { try { - int written = channel.write(headOp.getBuffersToWrite()); + int written = flushToChannel(headOp.getBuffersToWrite()); headOp.incrementIndex(written); } catch (IOException e) { channel.getSelector().executeFailedListener(headOp.getListener(), e); - ioException = true; throw e; } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java index 10afd53621dd8..fa664484c1c59 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -20,62 +20,26 @@ package org.elasticsearch.nio; import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.function.BiConsumer; -/** - * This context should implement the specific logic for a channel. When a channel receives a notification - * that it is ready to perform certain operations (read, write, etc) the {@link ChannelContext} will be - * called. This context will need to implement all protocol related logic. Additionally, if any special - * close behavior is required, it should be implemented in this context. - * - * The only methods of the context that should ever be called from a non-selector thread are - * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. - */ public interface ChannelContext { - - void channelRegistered() throws IOException; - - int read() throws IOException; - - void sendMessage(ByteBuffer[] buffers, BiConsumer listener); - - void queueWriteOperation(WriteOperation writeOperation); - - void flushChannel() throws IOException; - - boolean hasQueuedWriteOps(); + /** + * This method cleans up any context resources that need to be released when a channel is closed. It + * should only be called by the selector thread. + * + * @throws IOException during channel / context close + */ + void closeFromSelector() throws IOException; /** * Schedules a channel to be closed by the selector event loop with which it is registered. - *

+ * * If the channel is open and the state can be transitioned to closed, the close operation will * be scheduled with the event loop. - *

- * If the channel is already set to closed, it is assumed that it is already scheduled to be closed. - *

+ * * Depending on the underlying protocol of the channel, a close operation might simply close the socket * channel or may involve reading and writing messages. */ void closeChannel(); - /** - * This method indicates if a selector should close this channel. - * - * @return a boolean indicating if the selector should close - */ - boolean selectorShouldClose(); - - /** - * This method cleans up any context resources that need to be released when a channel is closed. It - * should only be called by the selector thread. - * - * @throws IOException during channel / context close - */ - void closeFromSelector() throws IOException; - - @FunctionalInterface - interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; - } + void handleException(Exception e); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java index a9909587453be..5fc3f46f998e6 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelFactory.java @@ -89,7 +89,6 @@ private Socket internalCreateChannel(SocketSelector selector, SocketChannel rawC try { Socket channel = createChannel(selector, rawChannel); assert channel.getContext() != null : "channel context should have been set on channel"; - assert channel.getExceptionContext() != null : "exception handler should have been set on channel"; return channel; } catch (Exception e) { closeRawChannel(rawChannel, e); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java index 42bc0555d509c..7cba9b998b311 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java @@ -69,7 +69,7 @@ protected void uncaughtException(Exception exception) { */ protected void handleClose(NioChannel channel) { try { - channel.closeFromSelector(); + channel.getContext().closeFromSelector(); } catch (IOException e) { closeException(channel, e); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java index 07b6b68908bd1..f671b39d4d61b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java @@ -59,6 +59,10 @@ public InboundChannelBuffer(Supplier pageSupplier) { ensureCapacity(PAGE_SIZE); } + public static InboundChannelBuffer allocatingInstance() { + return new InboundChannelBuffer(() -> new Page(ByteBuffer.allocate(PAGE_SIZE), () -> {})); + } + @Override public void close() { if (isClosed.compareAndSet(false, true)) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java index 438c013ecd0aa..690e3d3b38bda 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java @@ -32,6 +32,8 @@ public interface NioChannel { InetSocketAddress getLocalAddress(); + void close(); + void closeFromSelector() throws IOException; void register() throws ClosedChannelException; @@ -42,6 +44,8 @@ public interface NioChannel { NetworkChannel getRawChannel(); + ChannelContext getContext(); + /** * Adds a close listener to the channel. Multiple close listeners can be added. There is no guarantee * about the order in which close listeners will be executed. If the channel is already closed, the diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java index 8eb904dc74179..3d1748e413ac7 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java @@ -21,12 +21,13 @@ import java.io.IOException; import java.nio.channels.ServerSocketChannel; -import java.util.function.Consumer; +import java.util.concurrent.atomic.AtomicBoolean; public class NioServerSocketChannel extends AbstractNioChannel { private final ChannelFactory channelFactory; - private Consumer acceptContext; + private ServerChannelContext context; + private final AtomicBoolean contextSet = new AtomicBoolean(false); public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory channelFactory, AcceptingSelector selector) throws IOException { @@ -39,17 +40,22 @@ public NioServerSocketChannel(ServerSocketChannel socketChannel, ChannelFactory< } /** - * This method sets the accept context for a server socket channel. The accept context is called when a - * new channel is accepted. The parameter passed to the context is the new channel. + * This method sets the context for a server socket channel. The context is called when a new channel is + * accepted, an exception occurs, or it is time to close the channel. * - * @param acceptContext to call + * @param context to call */ - public void setAcceptContext(Consumer acceptContext) { - this.acceptContext = acceptContext; + public void setContext(ServerChannelContext context) { + if (contextSet.compareAndSet(false, true)) { + this.context = context; + } else { + throw new IllegalStateException("Context on this channel were already set. It should only be once."); + } } - public Consumer getAcceptContext() { - return acceptContext; + @Override + public ServerChannelContext getContext() { + return context; } @Override diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java index c9ea14446d935..aba98ff0cbff0 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java @@ -19,13 +19,10 @@ package org.elasticsearch.nio; -import org.elasticsearch.nio.utils.ExceptionsHelper; - import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; -import java.util.ArrayList; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -35,9 +32,8 @@ public class NioSocketChannel extends AbstractNioChannel { private final InetSocketAddress remoteAddress; private final CompletableFuture connectContext = new CompletableFuture<>(); private final SocketSelector socketSelector; - private final AtomicBoolean contextsSet = new AtomicBoolean(false); - private ChannelContext context; - private BiConsumer exceptionContext; + private final AtomicBoolean contextSet = new AtomicBoolean(false); + private SocketChannelContext context; private Exception connectException; public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) throws IOException { @@ -46,25 +42,6 @@ public NioSocketChannel(SocketChannel socketChannel, SocketSelector selector) th this.socketSelector = selector; } - @Override - public void closeFromSelector() throws IOException { - getSelector().assertOnSelectorThread(); - if (isOpen()) { - ArrayList closingExceptions = new ArrayList<>(2); - try { - super.closeFromSelector(); - } catch (IOException e) { - closingExceptions.add(e); - } - try { - context.closeFromSelector(); - } catch (IOException e) { - closingExceptions.add(e); - } - ExceptionsHelper.rethrowAndSuppress(closingExceptions); - } - } - @Override public SocketSelector getSelector() { return socketSelector; @@ -94,23 +71,19 @@ public int read(ByteBuffer[] buffers) throws IOException { } } - public void setContexts(ChannelContext context, BiConsumer exceptionContext) { - if (contextsSet.compareAndSet(false, true)) { + public void setContext(SocketChannelContext context) { + if (contextSet.compareAndSet(false, true)) { this.context = context; - this.exceptionContext = exceptionContext; } else { - throw new IllegalStateException("Contexts on this channel were already set. They should only be once."); + throw new IllegalStateException("Context on this channel were already set. It should only be once."); } } - public ChannelContext getContext() { + @Override + public SocketChannelContext getContext() { return context; } - public BiConsumer getExceptionContext() { - return exceptionContext; - } - public InetSocketAddress getRemoteAddress() { return remoteAddress; } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java new file mode 100644 index 0000000000000..551cab48e0577 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +public class ServerChannelContext implements ChannelContext { + + private final NioServerSocketChannel channel; + private final Consumer acceptor; + private final BiConsumer exceptionHandler; + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + public ServerChannelContext(NioServerSocketChannel channel, Consumer acceptor, + BiConsumer exceptionHandler) { + this.channel = channel; + this.acceptor = acceptor; + this.exceptionHandler = exceptionHandler; + } + + public void acceptChannel(NioSocketChannel acceptedChannel) { + acceptor.accept(acceptedChannel); + } + + @Override + public void closeFromSelector() throws IOException { + channel.closeFromSelector(); + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + channel.getSelector().queueChannelClose(channel); + } + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java new file mode 100644 index 0000000000000..62f82e8995d16 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +/** + * This context should implement the specific logic for a channel. When a channel receives a notification + * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will + * be called. This context will need to implement all protocol related logic. Additionally, if any special + * close behavior is required, it should be implemented in this context. + * + * The only methods of the context that should ever be called from a non-selector thread are + * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + */ +public abstract class SocketChannelContext implements ChannelContext { + + protected final NioSocketChannel channel; + private final BiConsumer exceptionHandler; + private boolean ioException; + private boolean peerClosed; + + protected SocketChannelContext(NioSocketChannel channel, BiConsumer exceptionHandler) { + this.channel = channel; + this.exceptionHandler = exceptionHandler; + } + + @Override + public void handleException(Exception e) { + exceptionHandler.accept(channel, e); + } + + public void channelRegistered() throws IOException {} + + public abstract int read() throws IOException; + + public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + + public abstract void queueWriteOperation(WriteOperation writeOperation); + + public abstract void flushChannel() throws IOException; + + public abstract boolean hasQueuedWriteOps(); + + /** + * This method indicates if a selector should close this channel. + * + * @return a boolean indicating if the selector should close + */ + public abstract boolean selectorShouldClose(); + + protected boolean hasIOException() { + return ioException; + } + + protected boolean isPeerClosed() { + return peerClosed; + } + + protected int readFromChannel(ByteBuffer buffer) throws IOException { + try { + int bytesRead = channel.read(buffer); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int readFromChannel(ByteBuffer[] buffers) throws IOException { + try { + int bytesRead = channel.read(buffers); + if (bytesRead < 0) { + peerClosed = true; + bytesRead = 0; + } + return bytesRead; + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer buffer) throws IOException { + try { + return channel.write(buffer); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + protected int flushToChannel(ByteBuffer[] buffers) throws IOException { + try { + return channel.write(buffers); + } catch (IOException e) { + ioException = true; + throw e; + } + } + + @FunctionalInterface + public interface ReadConsumer { + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index d5977cee851ed..b1192f11eb120 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -44,7 +44,7 @@ public SocketEventHandler(Logger logger) { * @param channel that was registered */ protected void handleRegistration(NioSocketChannel channel) throws IOException { - ChannelContext context = channel.getContext(); + SocketChannelContext context = channel.getContext(); context.channelRegistered(); if (context.hasQueuedWriteOps()) { SelectionKeyUtils.setConnectReadAndWriteInterested(channel); @@ -61,7 +61,7 @@ protected void handleRegistration(NioSocketChannel channel) throws IOException { */ protected void registrationException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -82,7 +82,7 @@ protected void handleConnect(NioSocketChannel channel) { */ protected void connectException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -103,7 +103,7 @@ protected void handleRead(NioSocketChannel channel) throws IOException { */ protected void readException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", channel), exception); - exceptionCaught(channel, exception); + channel.getContext().handleException(exception); } /** @@ -113,7 +113,7 @@ protected void readException(NioSocketChannel channel, Exception exception) { * @param channel that can be written to */ protected void handleWrite(NioSocketChannel channel) throws IOException { - ChannelContext channelContext = channel.getContext(); + SocketChannelContext channelContext = channel.getContext(); channelContext.flushChannel(); } @@ -125,20 +125,7 @@ protected void handleWrite(NioSocketChannel channel) throws IOException { */ protected void writeException(NioSocketChannel channel, Exception exception) { logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", channel), exception); - exceptionCaught(channel, exception); - } - - /** - * This method is called when handling an event from a channel fails due to an unexpected exception. - * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw - * {@link java.nio.channels.CancelledKeyException}. - * - * @param channel that caused the exception - * @param exception that was thrown - */ - protected void genericChannelException(NioChannel channel, Exception exception) { - super.genericChannelException(channel, exception); - exceptionCaught((NioSocketChannel) channel, exception); + channel.getContext().handleException(exception); } /** @@ -167,8 +154,4 @@ protected void postHandling(NioSocketChannel channel) { } } } - - private void exceptionCaught(NioSocketChannel channel, Exception e) { - channel.getExceptionContext().accept(channel, e); - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java index e35aa7b4d226b..2de48fb8899e2 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java @@ -122,7 +122,7 @@ public void queueWrite(WriteOperation writeOperation) { public void queueWriteInChannelBuffer(WriteOperation writeOperation) { assertOnSelectorThread(); NioSocketChannel channel = writeOperation.getChannel(); - ChannelContext context = channel.getContext(); + SocketChannelContext context = channel.getContext(); try { SelectionKeyUtils.setWriteInterested(channel); context.queueWriteOperation(writeOperation); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index 09800d981bd2d..d2dfe4f37a007 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -24,7 +24,7 @@ /** * This is a basic write operation that can be queued with a channel. The only requirements of a write * operation is that is has a listener and a reference to its channel. The actual conversion of the write - * operation implementation to bytes will be performed by the {@link ChannelContext}. + * operation implementation to bytes will be performed by the {@link SocketChannelContext}. */ public interface WriteOperation { diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java index 1f51fdc2017ae..23ab3bb3e1d62 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java @@ -27,8 +27,6 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; -import java.util.function.BiConsumer; -import java.util.function.Consumer; import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; @@ -41,21 +39,21 @@ public class AcceptorEventHandlerTests extends ESTestCase { private SocketSelector socketSelector; private ChannelFactory channelFactory; private NioServerSocketChannel channel; - private Consumer acceptedChannelCallback; + private ServerChannelContext context; @Before @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { channelFactory = mock(ChannelFactory.class); socketSelector = mock(SocketSelector.class); - acceptedChannelCallback = mock(Consumer.class); + context = mock(ServerChannelContext.class); ArrayList selectors = new ArrayList<>(); selectors.add(socketSelector); handler = new AcceptorEventHandler(logger, new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()]))); AcceptingSelector selector = mock(AcceptingSelector.class); channel = new DoNotRegisterServerChannel(mock(ServerSocketChannel.class), channelFactory, selector); - channel.setAcceptContext(acceptedChannelCallback); + channel.setContext(context); channel.register(); } @@ -80,11 +78,11 @@ public void testHandleAcceptCallsChannelFactory() throws IOException { @SuppressWarnings("unchecked") public void testHandleAcceptCallsServerAcceptCallback() throws IOException { NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class), socketSelector); - childChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); + childChannel.setContext(mock(SocketChannelContext.class)); when(channelFactory.acceptNioChannel(same(channel), same(socketSelector))).thenReturn(childChannel); handler.acceptChannel(channel); - verify(acceptedChannelCallback).accept(childChannel); + verify(context).acceptChannel(childChannel); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java index db0e6ae80badf..68ae1f2e50304 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -40,7 +40,7 @@ public class BytesChannelContextTests extends ESTestCase { - private ChannelContext.ReadConsumer readConsumer; + private SocketChannelContext.ReadConsumer readConsumer; private NioSocketChannel channel; private BytesChannelContext context; private InboundChannelBuffer channelBuffer; @@ -51,16 +51,14 @@ public class BytesChannelContextTests extends ESTestCase { @Before @SuppressWarnings("unchecked") public void init() { - readConsumer = mock(ChannelContext.ReadConsumer.class); + readConsumer = mock(SocketChannelContext.ReadConsumer.class); messageLength = randomInt(96) + 20; selector = mock(SocketSelector.class); listener = mock(BiConsumer.class); channel = mock(NioSocketChannel.class); - Supplier pageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); - channelBuffer = new InboundChannelBuffer(pageSupplier); - context = new BytesChannelContext(channel, readConsumer, channelBuffer); + channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new BytesChannelContext(channel, null, readConsumer, channelBuffer); when(channel.getSelector()).thenReturn(selector); when(selector.isOnCurrentThread()).thenReturn(true); @@ -153,11 +151,12 @@ public void testReadLessThanZeroMeansReadyForClose() throws IOException { } public void testCloseClosesChannelBuffer() throws IOException { + when(channel.isOpen()).thenReturn(true); Runnable closer = mock(Runnable.class); Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); buffer.ensureCapacity(1); - BytesChannelContext context = new BytesChannelContext(channel, readConsumer, buffer); + BytesChannelContext context = new BytesChannelContext(channel, null, readConsumer, buffer); context.closeFromSelector(); verify(closer).run(); } @@ -218,6 +217,7 @@ public void testWriteOpsClearedOnClose() throws Exception { assertTrue(context.hasQueuedWriteOps()); + when(channel.isOpen()).thenReturn(true); context.closeFromSelector(); verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java index e3f42139fd80e..1c8a8a130ccfa 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java @@ -28,7 +28,6 @@ import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; -import java.util.function.BiConsumer; import static org.mockito.Matchers.any; import static org.mockito.Matchers.same; @@ -139,7 +138,7 @@ private static class TestChannelFactory extends ChannelFactory() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); - - assertTrue(channel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - selector.queueChannelClose(channel); - closeFuture.actionGet(); - - - assertTrue(closedRawChannel.get()); - assertFalse(channel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); - } - - private class DoNotCloseServerChannel extends DoNotRegisterServerChannel { - - private DoNotCloseServerChannel(ServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector) - throws IOException { - super(channel, channelFactory, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); + try (ServerSocketChannel rawChannel = ServerSocketChannel.open()) { + NioServerSocketChannel channel = new NioServerSocketChannel(rawChannel, mock(ChannelFactory.class), selector); + channel.setContext(new ServerChannelContext(channel, mock(Consumer.class), mock(BiConsumer.class))); + channel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); + + assertTrue(channel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); + + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + channel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(channel); + closeFuture.actionGet(); + + + assertFalse(rawChannel.isOpen()); + assertFalse(channel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); } } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java index dd0956458fad3..bbda9233bbb80 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioSocketChannelTests.java @@ -35,14 +35,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class NioSocketChannelTests extends ESTestCase { private SocketSelector selector; - private AtomicBoolean closedRawChannel; private Thread thread; @Before @@ -50,7 +48,6 @@ public class NioSocketChannelTests extends ESTestCase { public void startSelector() throws IOException { selector = new SocketSelector(new SocketEventHandler(logger)); thread = new Thread(selector::runLoop); - closedRawChannel = new AtomicBoolean(false); thread.start(); FutureUtils.get(selector.isRunningFuture()); } @@ -66,80 +63,46 @@ public void testClose() throws Exception { AtomicBoolean isClosed = new AtomicBoolean(false); CountDownLatch latch = new CountDownLatch(1); - NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); - socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); - socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); - - assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - selector.queueChannelClose(socketChannel); - closeFuture.actionGet(); - - assertTrue(closedRawChannel.get()); - assertFalse(socketChannel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); - } - - @SuppressWarnings("unchecked") - public void testCloseContextExceptionDoesNotStopClose() throws Exception { - AtomicBoolean isClosed = new AtomicBoolean(false); - CountDownLatch latch = new CountDownLatch(1); - - IOException ioException = new IOException(); - NioSocketChannel socketChannel = new DoNotCloseChannel(mock(SocketChannel.class), selector); - ChannelContext context = mock(ChannelContext.class); - doThrow(ioException).when(context).closeFromSelector(); - socketChannel.setContexts(context, mock(BiConsumer.class)); - socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { - @Override - public void onResponse(Void o) { - isClosed.set(true); - latch.countDown(); - } - @Override - public void onFailure(Exception e) { - isClosed.set(true); - latch.countDown(); - } - })); - - assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); - assertFalse(isClosed.get()); - - PlainActionFuture closeFuture = PlainActionFuture.newFuture(); - socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); - selector.queueChannelClose(socketChannel); - closeFuture.actionGet(); - - assertTrue(closedRawChannel.get()); - assertFalse(socketChannel.isOpen()); - latch.await(); - assertTrue(isClosed.get()); + try(SocketChannel rawChannel = SocketChannel.open()) { + NioSocketChannel socketChannel = new NioSocketChannel(rawChannel, selector); + socketChannel.setContext(new BytesChannelContext(socketChannel, mock(BiConsumer.class), + mock(SocketChannelContext.ReadConsumer.class), InboundChannelBuffer.allocatingInstance())); + socketChannel.addCloseListener(ActionListener.toBiConsumer(new ActionListener() { + @Override + public void onResponse(Void o) { + isClosed.set(true); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + isClosed.set(true); + latch.countDown(); + } + })); + + assertTrue(socketChannel.isOpen()); + assertTrue(rawChannel.isOpen()); + assertFalse(isClosed.get()); + + PlainActionFuture closeFuture = PlainActionFuture.newFuture(); + socketChannel.addCloseListener(ActionListener.toBiConsumer(closeFuture)); + selector.queueChannelClose(socketChannel); + closeFuture.actionGet(); + + assertFalse(rawChannel.isOpen()); + assertFalse(socketChannel.isOpen()); + latch.await(); + assertTrue(isClosed.get()); + } } @SuppressWarnings("unchecked") public void testConnectSucceeds() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenReturn(true); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -148,15 +111,14 @@ public void testConnectSucceeds() throws Exception { assertTrue(socketChannel.isConnectComplete()); assertTrue(socketChannel.isOpen()); - assertFalse(closedRawChannel.get()); } @SuppressWarnings("unchecked") public void testConnectFails() throws Exception { SocketChannel rawChannel = mock(SocketChannel.class); when(rawChannel.finishConnect()).thenThrow(new ConnectException()); - NioSocketChannel socketChannel = new DoNotCloseChannel(rawChannel, selector); - socketChannel.setContexts(mock(ChannelContext.class), mock(BiConsumer.class)); + NioSocketChannel socketChannel = new DoNotRegisterChannel(rawChannel, selector); + socketChannel.setContext(mock(SocketChannelContext.class)); selector.scheduleForRegistration(socketChannel); PlainActionFuture connectFuture = PlainActionFuture.newFuture(); @@ -168,16 +130,4 @@ public void testConnectFails() throws Exception { // Even if connection fails the channel is 'open' until close() is called assertTrue(socketChannel.isOpen()); } - - private class DoNotCloseChannel extends DoNotRegisterChannel { - - private DoNotCloseChannel(SocketChannel channel, SocketSelector selector) throws IOException { - super(channel, selector); - } - - @Override - void closeRawChannel() throws IOException { - closedRawChannel.set(true); - } - } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index e0f833c9051d0..d74214636dbdd 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -53,9 +53,8 @@ public void setUpHandler() throws IOException { channel = new DoNotRegisterChannel(rawChannel, socketSelector); when(rawChannel.finishConnect()).thenReturn(true); - Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), () -> {}); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); - channel.setContexts(new BytesChannelContext(channel, mock(ChannelContext.ReadConsumer.class), buffer), exceptionHandler); + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + channel.setContext(new BytesChannelContext(channel, exceptionHandler, mock(SocketChannelContext.ReadConsumer.class), buffer)); channel.register(); channel.finishConnect(); @@ -64,7 +63,7 @@ public void setUpHandler() throws IOException { public void testRegisterCallsContext() throws IOException { NioSocketChannel channel = mock(NioSocketChannel.class); - ChannelContext channelContext = mock(ChannelContext.class); + SocketChannelContext channelContext = mock(SocketChannelContext.class); when(channel.getContext()).thenReturn(channelContext); when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); handler.handleRegistration(channel); @@ -102,8 +101,8 @@ public void testConnectExceptionCallsExceptionHandler() throws IOException { public void testHandleReadDelegatesToContext() throws IOException { NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); - ChannelContext context = mock(ChannelContext.class); - channel.setContexts(context, exceptionHandler); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); when(context.read()).thenReturn(1); handler.handleRead(channel); @@ -124,19 +123,19 @@ public void testWriteExceptionCallsExceptionHandler() { public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException { NioSocketChannel channel = mock(NioSocketChannel.class); - ChannelContext context = mock(ChannelContext.class); + SocketChannelContext context = mock(SocketChannelContext.class); when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); when(channel.getContext()).thenReturn(context); when(context.selectorShouldClose()).thenReturn(true); handler.postHandling(channel); - verify(channel).closeFromSelector(); + verify(context).closeFromSelector(); } public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOException { NioSocketChannel channel = mock(NioSocketChannel.class); - ChannelContext context = mock(ChannelContext.class); + SocketChannelContext context = mock(SocketChannelContext.class); when(channel.getSelectionKey()).thenReturn(new TestSelectionKey(0)); when(channel.getContext()).thenReturn(context); @@ -149,8 +148,8 @@ public void testPostHandlingCallWillNotCloseTheChannelIfNotReady() throws IOExce public void testPostHandlingWillAddWriteIfNecessary() throws IOException { NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ)); - ChannelContext context = mock(ChannelContext.class); - channel.setContexts(context, null); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); when(context.hasQueuedWriteOps()).thenReturn(true); @@ -162,8 +161,8 @@ public void testPostHandlingWillAddWriteIfNecessary() throws IOException { public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { NioSocketChannel channel = new DoNotRegisterChannel(rawChannel, mock(SocketSelector.class)); channel.setSelectionKey(new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE)); - ChannelContext context = mock(ChannelContext.class); - channel.setContexts(context, null); + SocketChannelContext context = mock(SocketChannelContext.class); + channel.setContext(context); when(context.hasQueuedWriteOps()).thenReturn(false); @@ -171,10 +170,4 @@ public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { handler.postHandling(channel); assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); } - - private void setWriteAndRead(NioChannel channel) { - SelectionKeyUtils.setConnectAndReadInterested(channel); - SelectionKeyUtils.removeConnectInterested(channel); - SelectionKeyUtils.setWriteInterested(channel); - } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index 9197fe38dbc0a..5992244b2f930 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -49,7 +49,7 @@ public class SocketSelectorTests extends ESTestCase { private SocketEventHandler eventHandler; private NioSocketChannel channel; private TestSelectionKey selectionKey; - private ChannelContext channelContext; + private SocketChannelContext channelContext; private BiConsumer listener; private ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; private Selector rawSelector; @@ -60,7 +60,7 @@ public void setUp() throws Exception { super.setUp(); eventHandler = mock(SocketEventHandler.class); channel = mock(NioSocketChannel.class); - channelContext = mock(ChannelContext.class); + channelContext = mock(SocketChannelContext.class); listener = mock(BiConsumer.class); selectionKey = new TestSelectionKey(0); selectionKey.attach(channel); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index d25d3c5974ad8..acea1ca5d482e 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -31,14 +31,15 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; -import org.elasticsearch.nio.BytesChannelContext; -import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -52,6 +53,7 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; +import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -182,18 +184,21 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesChannelContext context = new BytesChannelContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - nioChannel.setContexts(context, NioTransport.this::exceptionCaught); + BiConsumer exceptionHandler = NioTransport.this::exceptionCaught; + BytesChannelContext context = new BytesChannelContext(nioChannel, exceptionHandler, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { - TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel); - return nioServerChannel; + TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); + ServerChannelContext context = new ServerChannelContext(nioChannel, NioTransport.this::acceptChannel, (c, e) -> {}); + nioChannel.setContext(context); + return nioChannel; } } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index f0d01bf5a7da6..683ae146cfb9c 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -39,8 +39,8 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements private final String profile; public TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, - ChannelFactory channelFactory, - AcceptingSelector selector) throws IOException { + ChannelFactory channelFactory, + AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); this.profile = profile; } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index c5ec4c6bfb7aa..ec262261e54c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -32,12 +32,13 @@ import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; -import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; @@ -161,17 +162,19 @@ public MockSocketChannel createChannel(SocketSelector selector, SocketChannel ch Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - ChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesChannelContext context = new BytesChannelContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - nioChannel.setContexts(context, MockNioTransport.this::exceptionCaught); + BytesChannelContext context = new BytesChannelContext(nioChannel, MockNioTransport.this::exceptionCaught, nioReadConsumer, + new InboundChannelBuffer(pageSupplier)); + nioChannel.setContext(context); return nioChannel; } @Override public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); - nioServerChannel.setAcceptContext(MockNioTransport.this::acceptChannel); + ServerChannelContext context = new ServerChannelContext(nioServerChannel, MockNioTransport.this::acceptChannel, (c, e) -> {}); + nioServerChannel.setContext(context); return nioServerChannel; } } From 9db9bd52f7729904d11c3188618260683d5c7e78 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 18 Jan 2018 15:45:06 -0500 Subject: [PATCH 64/94] Clean up commits when global checkpoint advanced (#28140) Today we keep multiple index commits based on the current global checkpoint, but only clean up unneeded index commits when we have a new index commit. However, we can release the old index commits earlier once the global checkpoint has advanced enough. This commit makes an engine revisit the index deletion policy whenever a new global checkpoint value is persisted and advanced enough. Relates #10708 --- .../org/elasticsearch/index/IndexService.java | 2 +- .../index/engine/CombinedDeletionPolicy.java | 19 +++++++++- .../elasticsearch/index/engine/Engine.java | 8 ++++ .../index/engine/InternalEngine.java | 23 ++++++++++- .../seqno/GlobalCheckpointSyncAction.java | 5 +-- .../elasticsearch/index/shard/IndexShard.java | 7 ++-- .../engine/CombinedDeletionPolicyTests.java | 38 +++++++++++++++++++ .../index/engine/InternalEngineTests.java | 25 ++++++++++++ .../ESIndexLevelReplicationTestCase.java | 2 +- .../GlobalCheckpointSyncActionTests.java | 4 +- 10 files changed, 119 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 2d5a1dda46460..0285dcf93c1ea 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -700,7 +700,7 @@ private void maybeFSyncTranslogs() { try { Translog translog = shard.getTranslog(); if (translog.syncNeeded()) { - translog.sync(); + shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index ca0d93fa7c5aa..48a3caf0ea32b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -47,8 +47,8 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final LongSupplier globalCheckpointSupplier; private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. - private IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. - private IndexCommit lastCommit; // the most recent commit point + private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. + private volatile IndexCommit lastCommit; // the most recent commit point CombinedDeletionPolicy(EngineConfig.OpenMode openMode, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { @@ -214,6 +214,21 @@ private static int indexOfKeptCommits(List commits, long return 0; } + /** + * Checks if the deletion policy can release some index commits with the latest global checkpoint. + */ + boolean hasUnreferencedCommits() throws IOException { + final IndexCommit lastCommit = this.lastCommit; + if (safeCommit != lastCommit) { // Race condition can happen but harmless + if (lastCommit.getUserData().containsKey(SequenceNumbers.MAX_SEQ_NO)) { + final long maxSeqNoFromLastCommit = Long.parseLong(lastCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + // We can clean up the current safe commit if the last commit is safe + return globalCheckpointSupplier.getAsLong() >= maxSeqNoFromLastCommit; + } + } + return false; + } + /** * A wrapper of an index commit that prevents it from being deleted. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b73bfb78f3cb9..7feaeb63ac36f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -91,6 +91,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; +import java.util.stream.Stream; public abstract class Engine implements Closeable { @@ -549,6 +550,13 @@ public enum SearcherScope { /** returns the translog for this engine */ public abstract Translog getTranslog(); + /** + * Ensures that all locations in the given stream have been written to the underlying storage. + */ + public abstract boolean ensureTranslogSynced(Stream locations) throws IOException; + + public abstract void syncTranslog() throws IOException; + protected void ensureOpen() { if (isClosed.get()) { throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1efbd0706d156..97a6403ec3b23 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -31,7 +31,6 @@ import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -94,6 +93,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import java.util.function.LongSupplier; +import java.util.stream.Stream; public class InternalEngine extends Engine { @@ -520,6 +520,27 @@ public Translog getTranslog() { return translog; } + @Override + public boolean ensureTranslogSynced(Stream locations) throws IOException { + final boolean synced = translog.ensureSynced(locations); + if (synced) { + revisitIndexDeletionPolicyOnTranslogSynced(); + } + return synced; + } + + @Override + public void syncTranslog() throws IOException { + translog.sync(); + revisitIndexDeletionPolicyOnTranslogSynced(); + } + + private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { + if (combinedDeletionPolicy.hasUnreferencedCommits()) { + indexWriter.deleteUnusedFiles(); + } + } + @Override public String getHistoryUUID() { return historyUUID; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index 95e3505e7467e..0ec03cb7a8f5e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -130,10 +130,9 @@ protected ReplicaResult shardOperationOnReplica(final Request request, final Ind } private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { - final Translog translog = indexShard.getTranslog(); if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - translog.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { - indexShard.getTranslog().sync(); + indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index b5d28b3a9ecce..3ace9ededc5b3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2316,8 +2316,7 @@ public int getActiveOperationsCount() { @Override protected void write(List>> candidates) throws IOException { try { - final Engine engine = getEngine(); - engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1)); + getEngine().ensureTranslogSynced(candidates.stream().map(Tuple::v1)); } catch (AlreadyClosedException ex) { // that's fine since we already synced everything on engine close - this also is conform with the methods // documentation @@ -2342,9 +2341,9 @@ public final void sync(Translog.Location location, Consumer syncListe translogSyncProcessor.put(location, syncListener); } - public final void sync() throws IOException { + public void sync() throws IOException { verifyNotClosed(); - getEngine().getTranslog().sync(); + getEngine().syncTranslog(); } /** diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index ca6059dae0067..d4af783681029 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -244,6 +244,44 @@ public void testKeepOnlyStartingCommitOnInit() throws Exception { equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); } + public void testCheckUnreferencedCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final UUID translogUUID = UUID.randomUUID(); + final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy( + OPEN_INDEX_AND_TRANSLOG, translogPolicy, globalCheckpoint::get, null); + final List commitList = new ArrayList<>(); + int totalCommits = between(2, 20); + long lastMaxSeqNo = between(1, 1000); + long lastTranslogGen = between(1, 50); + for (int i = 0; i < totalCommits; i++) { + lastMaxSeqNo += between(1, 10000); + lastTranslogGen += between(1, 100); + commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + } + IndexCommit safeCommit = randomFrom(commitList); + globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); + indexPolicy.onCommit(commitList); + if (safeCommit == commitList.get(commitList.size() - 1)) { + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } else { + // Advanced but not enough + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), lastMaxSeqNo - 1)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + // Advanced enough + globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(true)); + indexPolicy.onCommit(commitList); + // Safe commit is the last commit - no need to clean up + assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); + assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(indexPolicy.hasUnreferencedCommits(), equalTo(false)); + } + } + IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index db62db7e01b46..2a7e49aa66b61 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4414,4 +4414,29 @@ public void testOpenIndexCreateTranslogKeepOnlyLastCommit() throws Exception { assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo("1")); } } + + public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { + IOUtils.close(engine, store); + store = createStore(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + final int numDocs = scaledRandomIntBetween(10, 100); + for (int docId = 0; docId < numDocs; docId++) { + index(engine, docId); + if (frequently()) { + engine.flush(randomBoolean(), randomBoolean()); + } + } + engine.flush(false, randomBoolean()); + List commits = DirectoryReader.listCommits(store.directory()); + // Global checkpoint advanced but not enough - all commits are kept. + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint() - 1)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), equalTo(commits)); + // Global checkpoint advanced enough - only the last commit is kept. + globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpointTracker().getCheckpoint(), Long.MAX_VALUE)); + engine.syncTranslog(); + assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index e80c2df4ea060..a091cd44c4a7f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -657,7 +657,7 @@ protected PrimaryResult performOnPrimary( @Override protected void performOnReplica(final GlobalCheckpointSyncAction.Request request, final IndexShard replica) throws IOException { - replica.getTranslog().sync(); + replica.sync(); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 618714fc9d959..3fc62673de0ce 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -123,9 +123,9 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { } if (durability == Translog.Durability.ASYNC || lastSyncedGlobalCheckpoint == globalCheckpoint) { - verify(translog, never()).sync(); + verify(indexShard, never()).sync(); } else { - verify(translog).sync(); + verify(indexShard).sync(); } } From 19a2b01e435041984b4a0a6b4459e8783d479a89 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 14:15:44 -0800 Subject: [PATCH 65/94] Build: Omit dependency licenses check for elasticsearch deps (#28304) Sometimes modules/plugins depend on locally built elasticsearch jars. This means not only that the jar is constantly changing (so no need for a sha check), but also that the license falls under the Elasticsearch license, and there is no need to keep another copy. This commit updates the dependencies checked by dependencyLicenses to exclude those that are built by elasticsearch. --- .../groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 5 ++++- modules/reindex/build.gradle | 7 ------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index dd1459418d013..0df80116099e3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -659,7 +659,10 @@ class BuildPlugin implements Plugin { Task precommit = PrecommitTasks.create(project, true) project.check.dependsOn(precommit) project.test.mustRunAfter(precommit) - project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided + // only require dependency licenses for non-elasticsearch deps + project.dependencyLicenses.dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } - project.configurations.provided } private static configureDependenciesInfo(Project project) { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index f29daf799122d..479fe78cc8071 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -52,13 +52,6 @@ dependencies { testCompile project(path: ':modules:parent-join', configuration: 'runtime') } -dependencyLicenses { - // Don't check the client's license. We know it. - dependencies = project.configurations.runtime.fileCollection { - it.group.startsWith('org.elasticsearch') == false - } - project.configurations.provided -} - thirdPartyAudit.excludes = [ // Commons logging 'javax.servlet.ServletContextEvent', From 531c58cf81cb48f7c883d1b41e34903ae1074508 Mon Sep 17 00:00:00 2001 From: David Kemp Date: Fri, 19 Jan 2018 09:19:01 +1100 Subject: [PATCH 66/94] Documents applicability of term query to range type (#28166) Closes #27030 --- docs/reference/mapping/types/range.asciidoc | 71 ++++++++++++++++++-- docs/reference/query-dsl/term-query.asciidoc | 2 + 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 76aba68277121..0ef4a463c9bb1 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -47,18 +47,16 @@ PUT range_index/_doc/1 -------------------------------------------------- //CONSOLE -The following is an example of a `date_range` query over the `date_range` field named "time_frame". +The following is an example of a <> on the `integer_range` field named "expected_attendees". [source,js] -------------------------------------------------- -POST range_index/_search +GET range_index/_search { "query" : { - "range" : { - "time_frame" : { <5> - "gte" : "2015-10-31", - "lte" : "2015-11-01", - "relation" : "within" <6> + "term" : { + "expected_attendees" : { + "value": 12 } } } @@ -104,6 +102,27 @@ The result produced by the above query. -------------------------------------------------- // TESTRESPONSE[s/"took": 13/"took" : $body.took/] + +The following is an example of a `date_range` query over the `date_range` field named "time_frame". + +[source,js] +-------------------------------------------------- +GET range_index/_search +{ + "query" : { + "range" : { + "time_frame" : { <5> + "gte" : "2015-10-31", + "lte" : "2015-11-01", + "relation" : "within" <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:range_index] + <1> `date_range` types accept the same field parameters defined by the <> type. <2> Example indexing a meeting with 10 to 20 attendees. <3> Date ranges accept the same format as described in <>. @@ -112,6 +131,44 @@ The result produced by the above query. <6> Range queries over range <> support a `relation` parameter which can be one of `WITHIN`, `CONTAINS`, `INTERSECTS` (default). +This query produces a similar result: + +[source,js] +-------------------------------------------------- +{ + "took": 13, + "timed_out": false, + "_shards" : { + "total": 2, + "successful": 2, + "skipped" : 0, + "failed": 0 + }, + "hits" : { + "total" : 1, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "range_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 1.0, + "_source" : { + "expected_attendees" : { + "gte" : 10, "lte" : 20 + }, + "time_frame" : { + "gte" : "2015-10-31 12:00:00", "lte" : "2015-11-01" + } + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 13/"took" : $body.took/] + + [[range-params]] ==== Parameters for range fields diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 4b668203a33ad..f1224f33ca7d4 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -51,6 +51,8 @@ GET _search as the query clause for `normal`. <2> The `normal` clause has the default neutral boost of `1.0`. +A `term` query can also match against <>. + .Why doesn't the `term` query match my document? ************************************************** From ef468327e9b07cf25640fc52d37c4594709a4c28 Mon Sep 17 00:00:00 2001 From: Andrew Kramarev Date: Thu, 18 Jan 2018 14:27:30 -0800 Subject: [PATCH 67/94] mistyping in one of the highlighting examples comment -> content (#28139) --- docs/reference/search/request/highlighting.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index fd680bb6d6c2d..4552366de9800 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -24,7 +24,7 @@ GET /_search }, "highlight" : { "fields" : { - "comment" : {} + "content" : {} } } } From ba9c9e08e74f35ca7522e1a087216497bae9fa38 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 19:16:26 -0800 Subject: [PATCH 68/94] Painless: Add spi jar that will be published for extending whitelists (#28302) In order to build a plugin that extends the painless whitelist, the spi classes must be available to the plugin at compile time. This commit moves the spi classes into a separate jar which will be published. Any plugin authors whiching to extend painless through spi would then add a compileOnly dependency on this jar. --- distribution/build.gradle | 2 +- modules/build.gradle | 2 +- modules/lang-painless/build.gradle | 1 + modules/lang-painless/spi/build.gradle | 40 +++++++++++++++++++ .../painless/spi/PainlessExtension.java | 0 .../elasticsearch/painless/spi/Whitelist.java | 0 .../painless/spi/WhitelistLoader.java | 0 7 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 modules/lang-painless/spi/build.gradle rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java (100%) rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/Whitelist.java (100%) rename modules/lang-painless/{ => spi}/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java (100%) diff --git a/distribution/build.gradle b/distribution/build.gradle index c6fc9d5b6946f..d322aa9c1ff12 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -83,7 +83,7 @@ ext.restTestExpansions = [ // we create the buildModules task above so the distribution subprojects can // depend on it, but we don't actually configure it until here so we can do a single // loop over modules to also setup cross task dependencies and increment our modules counter -project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each { Project module -> +project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module -> buildFullNotice { def defaultLicensesDir = new File(module.projectDir, 'licenses') if (defaultLicensesDir.exists()) { diff --git a/modules/build.gradle b/modules/build.gradle index b3dbde24936e9..7f7e7e0965bc9 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -subprojects { +configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars apply plugin: 'elasticsearch.esplugin' diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 0bd96725c66b4..d287d7ee02378 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -31,6 +31,7 @@ integTestCluster { dependencies { compile 'org.antlr:antlr4-runtime:4.5.3' compile 'org.ow2.asm:asm-debug-all:5.1' + compile project('spi') } dependencyLicenses { diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle new file mode 100644 index 0000000000000..7e43a242a23a9 --- /dev/null +++ b/modules/lang-painless/spi/build.gradle @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +group = 'org.elasticsearch.plugin' +archivesBaseName = 'elasticsearch-scripting-painless-spi' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.elasticsearch:elasticsearch:${version}" +} + +// no tests...yet? +test.enabled = false diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/PainlessExtension.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/Whitelist.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java similarity index 100% rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java rename to modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java From 7a82bb94b39e62d04c7e4b4e25e212dc364a7477 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 18 Jan 2018 21:13:39 -0800 Subject: [PATCH 69/94] Build: Fix meta plugin usage in integ test clusters (#28307) This commit fixes places handling plugin projects in cluster formation to work with both esplugin and es_meta_plugin. --- .../gradle/test/ClusterFormationTasks.groovy | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d39de58382520..593a08c873594 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder @@ -139,8 +140,8 @@ class ClusterFormationTasks { /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) { verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject) - PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin'); - project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${extension.name}:${elasticsearchVersion}@zip") + final String pluginName = findPluginName(pluginProject) + project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") } /** @@ -450,7 +451,7 @@ class ClusterFormationTasks { configuration = project.configurations.create(configurationName) } - final String depName = pluginProject.extensions.findByName('esplugin').name + final String depName = findPluginName(pluginProject) Dependency dep = bwcPlugins.dependencies.find { it.name == depName @@ -759,4 +760,14 @@ class ClusterFormationTasks { "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") } } + + /** Find the plugin name in the given project, whether a regular plugin or meta plugin. */ + static String findPluginName(Project pluginProject) { + PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') + if (extension != null) { + return extension.name + } else { + return pluginProject.extensions.findByName('es_meta_plugin').name + } + } } From b7e1d6fe3ecc94eb74f33d6444bea81af1fb3634 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 18 Jan 2018 19:43:23 +0100 Subject: [PATCH 70/94] [Docs] Remove typo in painless-getting-started.asciidoc --- docs/painless/painless-getting-started.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 155b5f272b426..7898631416b6b 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -320,7 +320,7 @@ POST hockey/player/_update_by_query Note: all of the `_update_by_query` examples above could really do with a `query` to limit the data that they pull back. While you *could* use a -See {ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient +{ref}/query-dsl-script-query.html[script query] it wouldn't be as efficient as using any other query because script queries aren't able to use the inverted index to limit the documents that they have to check. From 0a4a4c8a0e679d79428353e1f7436d8d523557ab Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 19 Jan 2018 10:17:22 +0000 Subject: [PATCH 71/94] Minor improvements to translog docs (#28237) The use of the phrase "translog" vs "transaction log" was inconsistent, and it was apparently unclear that the translog was stored on every shard copy. --- .../reference/index-modules/translog.asciidoc | 82 +++++++++++-------- 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 31d529b6c4436..b1eb36e346d9f 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -1,41 +1,44 @@ [[index-modules-translog]] == Translog -Changes to Lucene are only persisted to disk during a Lucene commit, -which is a relatively heavy operation and so cannot be performed after every -index or delete operation. Changes that happen after one commit and before another -will be lost in the event of process exit or HW failure. - -To prevent this data loss, each shard has a _transaction log_ or write ahead -log associated with it. Any index or delete operation is written to the -translog after being processed by the internal Lucene index. - -In the event of a crash, recent transactions can be replayed from the -transaction log when the shard recovers. +Changes to Lucene are only persisted to disk during a Lucene commit, which is a +relatively expensive operation and so cannot be performed after every index or +delete operation. Changes that happen after one commit and before another will +be removed from the index by Lucene in the event of process exit or hardware +failure. + +Because Lucene commits are too expensive to perform on every individual change, +each shard copy also has a _transaction log_ known as its _translog_ associated +with it. All index and delete operations are written to the translog after +being processed by the internal Lucene index but before they are acknowledged. +In the event of a crash, recent transactions that have been acknowledged but +not yet included in the last Lucene commit can instead be recovered from the +translog when the shard recovers. An Elasticsearch flush is the process of performing a Lucene commit and -starting a new translog. It is done automatically in the background in order -to make sure the transaction log doesn't grow too large, which would make +starting a new translog. Flushes are performed automatically in the background +in order to make sure the translog doesn't grow too large, which would make replaying its operations take a considerable amount of time during recovery. -It is also exposed through an API, though its rarely needed to be performed -manually. +The ability to perform a flush manually is also exposed through an API, +although this is rarely needed. [float] === Translog settings -The data in the transaction log is only persisted to disk when the translog is +The data in the translog is only persisted to disk when the translog is ++fsync++ed and committed. In the event of hardware failure, any data written since the previous translog commit will be lost. -By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds if `index.translog.durability` is set -to `async` or if set to `request` (default) at the end of every <>, <>, -<>, or <> request. In fact, Elasticsearch -will only report success of an index, delete, update, or bulk request to the -client after the transaction log has been successfully ++fsync++ed and committed -on the primary and on every allocated replica. +By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds +if `index.translog.durability` is set to `async` or if set to `request` +(default) at the end of every <>, <>, +<>, or <> request. More precisely, if set +to `request`, Elasticsearch will only report success of an index, delete, +update, or bulk request to the client after the translog has been successfully +++fsync++ed and committed on the primary and on every allocated replica. -The following <> per-index settings -control the behaviour of the transaction log: +The following <> per-index +settings control the behaviour of the translog: `index.translog.sync_interval`:: @@ -64,17 +67,20 @@ update, or bulk request. This setting accepts the following parameters: `index.translog.flush_threshold_size`:: -The translog stores all operations that are not yet safely persisted in Lucene (i.e., are -not part of a lucene commit point). Although these operations are available for reads, they will -need to be reindexed if the shard was to shutdown and has to be recovered. This settings controls -the maximum total size of these operations, to prevent recoveries from taking too long. Once the -maximum size has been reached a flush will happen, generating a new Lucene commit. Defaults to `512mb`. +The translog stores all operations that are not yet safely persisted in Lucene +(i.e., are not part of a Lucene commit point). Although these operations are +available for reads, they will need to be reindexed if the shard was to +shutdown and has to be recovered. This settings controls the maximum total size +of these operations, to prevent recoveries from taking too long. Once the +maximum size has been reached a flush will happen, generating a new Lucene +commit point. Defaults to `512mb`. `index.translog.retention.size`:: -The total size of translog files to keep. Keeping more translog files increases the chance of performing -an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery -will fall back to a file based sync. Defaults to `512mb` +The total size of translog files to keep. Keeping more translog files increases +the chance of performing an operation based sync when recovering replicas. If +the translog files are not sufficient, replica recovery will fall back to a +file based sync. Defaults to `512mb` `index.translog.retention.age`:: @@ -86,10 +92,14 @@ The maximum duration for which translog files will be kept. Defaults to `12h`. [[corrupt-translog-truncation]] === What to do if the translog becomes corrupted? -In some cases (a bad drive, user error) the translog can become corrupted. When -this corruption is detected by Elasticsearch due to mismatching checksums, -Elasticsearch will fail the shard and refuse to allocate that copy of the data -to the node, recovering from a replica if available. +In some cases (a bad drive, user error) the translog on a shard copy can become +corrupted. When this corruption is detected by Elasticsearch due to mismatching +checksums, Elasticsearch will fail that shard copy and refuse to use that copy +of the data. If there are other copies of the shard available then +Elasticsearch will automatically recover from one of them using the normal +shard allocation and recovery mechanism. In particular, if the corrupt shard +copy was the primary when the corruption was detected then one of its replicas +will be promoted in its place. If there is no copy of the data from which Elasticsearch can recover successfully, a user may want to recover the data that is part of the shard at From ef76d99d863b5c5c136019087cc7f1fcc86e0fa7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 20 Jan 2018 22:16:59 -0500 Subject: [PATCH 72/94] Add 6.3 version constant to master This commit adds the 6.3 version constant to the master branch after 6.2 was cut from 6.x. --- server/src/main/java/org/elasticsearch/Version.java | 4 ++++ settings.gradle | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index df748e7959254..b741c34fab98a 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -148,6 +148,8 @@ public class Version implements Comparable { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final int V_6_3_0_ID = 6030099; + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); @@ -166,6 +168,8 @@ public static Version fromId(int id) { switch (id) { case V_7_0_0_alpha1_ID: return V_7_0_0_alpha1; + case V_6_3_0_ID: + return V_6_3_0; case V_6_2_0_ID: return V_6_2_0; case V_6_1_3_ID: diff --git a/settings.gradle b/settings.gradle index c135b431a2370..e3a24ea148d95 100644 --- a/settings.gradle +++ b/settings.gradle @@ -78,7 +78,7 @@ addSubProjects('', new File(rootProject.projectDir, 'plugins'), projects, []) addSubProjects('', new File(rootProject.projectDir, 'qa'), projects, []) /* Create projects for building BWC snapshot distributions from the heads of other branches */ -final List branches = ['5.6', '6.0', '6.1', '6.x'] +final List branches = ['5.6', '6.0', '6.1', '6.2', '6.x'] for (final String branch : branches) { projects.add("distribution:bwc-snapshot-${branch}".toString()) } From 3a43bb1ba9c04274406ecb0ef74e91f0f21fec4b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sun, 21 Jan 2018 15:37:31 -0800 Subject: [PATCH 73/94] Build: Add pom generation to meta plugins (#28321) This commit adds pom generation to meta plugins by using the same hacks that PluginBuildPlugin already uses to get around "pom" type poms (ie zip files). --- .../gradle/plugin/MetaPluginBuildPlugin.groovy | 6 +++++- .../elasticsearch/gradle/plugin/PluginBuildPlugin.groovy | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index fbef0d8e49721..3df9b604c1309 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -19,7 +19,7 @@ package org.elasticsearch.gradle.plugin -import org.elasticsearch.gradle.test.RestIntegTestTask +import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.test.RestTestPlugin import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.test.StandaloneRestTestPlugin @@ -42,6 +42,10 @@ class MetaPluginBuildPlugin implements Plugin { dependsOn(project.bundlePlugin) plugin(project.path) } + BuildPlugin.configurePomGeneration(project) + project.afterEvaluate { + PluginBuildPlugin.addZipPomGeneration(project) + } RunTask run = project.tasks.create('run', RunTask) run.dependsOn(project.bundlePlugin) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index f342a68707ed6..950acad9a5eb4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.plugin +import nebula.plugin.info.scm.ScmInfoPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask @@ -220,7 +221,8 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to generate a pom file for the zip distribution. */ - protected void addZipPomGeneration(Project project) { + public static void addZipPomGeneration(Project project) { + project.plugins.apply(ScmInfoPlugin.class) project.plugins.apply(MavenPublishPlugin.class) project.publishing { From 1ae920cb90047d3e2a908fc5526de069e0542442 Mon Sep 17 00:00:00 2001 From: Peter Dyson Date: Mon, 22 Jan 2018 18:39:21 +1000 Subject: [PATCH 74/94] Provide explanation of dangling indices, fixes #26008 (#26999) * Provide explanation of dangling indices, fixes #26008 Adjusted from PR review comments * updates to suggested wording and minor typo fix. --- docs/reference/modules/gateway.asciidoc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index 0af0d31fba2c5..76e0840793996 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -48,3 +48,12 @@ as long as the following conditions are met: Recover as long as this many data nodes have joined the cluster. NOTE: These settings only take effect on a full cluster restart. + +=== Dangling indices + +When a node joins the cluster, any shards stored in its local data directory +directory which do not already exist in the cluster will be imported into the +cluster. This functionality is intended as a best effort to help users who +lose all master nodes. If a new master node is started which is unaware of +the other indices in the cluster, adding the old nodes will cause the old +indices to be imported, instead of being deleted. From 119b1b5c2b26a6adc2df7a95396da7b2411b5f09 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 22 Jan 2018 09:52:57 +0100 Subject: [PATCH 75/94] Add information when master node left to DiscoveryNodes' shortSummary() (#28197) This commit changes `DiscoveryNodes.Delta.shortSummary()` in order to add information to the summary when the master node left. --- .../cluster/node/DiscoveryNodes.java | 92 +++++++++---------- .../cluster/node/DiscoveryNodesTests.java | 7 +- 2 files changed, 45 insertions(+), 54 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 4373069a5f77c..057d37d5999a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -39,6 +39,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; /** * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to @@ -205,12 +206,14 @@ public DiscoveryNode getLocalNode() { } /** - * Get the master node - * - * @return master node + * Returns the master node, or {@code null} if there is no master node */ + @Nullable public DiscoveryNode getMasterNode() { - return nodes.get(masterNodeId); + if (masterNodeId != null) { + return nodes.get(masterNodeId); + } + return null; } /** @@ -385,27 +388,20 @@ public DiscoveryNodes newNode(DiscoveryNode node) { * Returns the changes comparing this nodes to the provided nodes. */ public Delta delta(DiscoveryNodes other) { - List removed = new ArrayList<>(); - List added = new ArrayList<>(); + final List removed = new ArrayList<>(); + final List added = new ArrayList<>(); for (DiscoveryNode node : other) { - if (!this.nodeExists(node)) { + if (this.nodeExists(node) == false) { removed.add(node); } } for (DiscoveryNode node : this) { - if (!other.nodeExists(node)) { + if (other.nodeExists(node) == false) { added.add(node); } } - DiscoveryNode previousMasterNode = null; - DiscoveryNode newMasterNode = null; - if (masterNodeId != null) { - if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) { - previousMasterNode = other.getMasterNode(); - newMasterNode = getMasterNode(); - } - } - return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), + + return new Delta(other.getMasterNode(), getMasterNode(), localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); } @@ -429,8 +425,8 @@ public String toString() { public static class Delta { private final String localNodeId; - private final DiscoveryNode previousMasterNode; - private final DiscoveryNode newMasterNode; + @Nullable private final DiscoveryNode previousMasterNode; + @Nullable private final DiscoveryNode newMasterNode; private final List removed; private final List added; @@ -448,13 +444,15 @@ public boolean hasChanges() { } public boolean masterNodeChanged() { - return newMasterNode != null; + return Objects.equals(newMasterNode, previousMasterNode) == false; } + @Nullable public DiscoveryNode previousMasterNode() { return previousMasterNode; } + @Nullable public DiscoveryNode newMasterNode() { return newMasterNode; } @@ -476,51 +474,45 @@ public List addedNodes() { } public String shortSummary() { - StringBuilder sb = new StringBuilder(); - if (!removed() && masterNodeChanged()) { - if (newMasterNode.getId().equals(localNodeId)) { - // we are the master, no nodes we removed, we are actually the first master - sb.append("new_master ").append(newMasterNode()); - } else { - // we are not the master, so we just got this event. No nodes were removed, so its not a *new* master - sb.append("detected_master ").append(newMasterNode()); + final StringBuilder summary = new StringBuilder(); + if (masterNodeChanged()) { + summary.append("master node changed {previous ["); + if (previousMasterNode() != null) { + summary.append(previousMasterNode()); } - } else { - if (masterNodeChanged()) { - sb.append("master {new ").append(newMasterNode()); - if (previousMasterNode() != null) { - sb.append(", previous ").append(previousMasterNode()); - } - sb.append("}"); + summary.append("], current ["); + if (newMasterNode() != null) { + summary.append(newMasterNode()); } - if (removed()) { - if (masterNodeChanged()) { - sb.append(", "); - } - sb.append("removed {"); - for (DiscoveryNode node : removedNodes()) { - sb.append(node).append(','); - } - sb.append("}"); + summary.append("]}"); + } + if (removed()) { + if (summary.length() > 0) { + summary.append(", "); + } + summary.append("removed {"); + for (DiscoveryNode node : removedNodes()) { + summary.append(node).append(','); } + summary.append("}"); } if (added()) { // don't print if there is one added, and it is us if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) { - if (removed() || masterNodeChanged()) { - sb.append(", "); + if (summary.length() > 0) { + summary.append(", "); } - sb.append("added {"); + summary.append("added {"); for (DiscoveryNode node : addedNodes()) { if (!node.getId().equals(localNodeId)) { // don't print ourself - sb.append(node).append(','); + summary.append(node).append(','); } } - sb.append("}"); + summary.append("}"); } } - return sb.toString(); + return summary.toString(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 9200e04c7127a..6bfb78a2ade9c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -140,15 +140,14 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); - if (masterB == null || Objects.equals(masterAId, masterBId)) { + if (Objects.equals(masterAId, masterBId)) { assertFalse(delta.masterNodeChanged()); assertThat(delta.previousMasterNode(), nullValue()); assertThat(delta.newMasterNode(), nullValue()); } else { assertTrue(delta.masterNodeChanged()); - assertThat(delta.newMasterNode().getId(), equalTo(masterBId)); - assertThat(delta.previousMasterNode() != null ? delta.previousMasterNode().getId() : null, - equalTo(masterAId)); + assertThat(delta.newMasterNode() != null ? delta.newMasterNode().getId() : null, equalTo(masterBId)); + assertThat(delta.previousMasterNode() != null ? delta.previousMasterNode().getId() : null, equalTo(masterAId)); } Set newNodes = new HashSet<>(nodesB); From 700d9ecc953fa30df0d12d086f0e9d9322446459 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 22 Jan 2018 12:03:07 +0100 Subject: [PATCH 76/94] Remove the `update_all_types` option. (#28288) This option is not useful in 7.x since no indices may have more than one type anymore. --- .../org/elasticsearch/client/Request.java | 8 -- .../elasticsearch/client/RequestTests.java | 8 -- .../migration/migrate_7_0/mappings.asciidoc | 6 +- .../index/mapper/ScaledFloatFieldMapper.java | 8 +- .../index/mapper/TokenCountFieldMapper.java | 4 +- .../mapper/TokenCountFieldMapperTests.java | 4 +- .../join/mapper/ParentIdFieldMapper.java | 4 +- .../join/mapper/ParentJoinFieldMapper.java | 8 +- .../mapper/ParentJoinFieldMapperTests.java | 34 ++++---- .../join/query/HasChildQueryBuilderTests.java | 2 +- .../query/HasParentQueryBuilderTests.java | 2 +- .../LegacyHasChildQueryBuilderTests.java | 4 +- .../LegacyHasParentQueryBuilderTests.java | 6 +- .../LegacyParentIdQueryBuilderTests.java | 4 +- .../join/query/ParentIdQueryBuilderTests.java | 2 +- .../percolator/CandidateQueryTests.java | 4 +- .../PercolateQueryBuilderTests.java | 4 +- .../PercolatorFieldMapperTests.java | 10 +-- .../ICUCollationKeywordFieldMapper.java | 8 +- .../ICUCollationKeywordFieldMapperTests.java | 4 +- .../index/mapper/size/SizeFieldMapper.java | 2 +- .../index/mapper/size/SizeMappingTests.java | 2 +- .../rest-api-spec/api/indices.create.json | 4 - .../api/indices.put_mapping.json | 4 - .../CreateIndexClusterStateUpdateRequest.java | 10 +-- .../indices/create/CreateIndexRequest.java | 21 ++--- .../create/CreateIndexRequestBuilder.java | 6 -- .../create/TransportCreateIndexAction.java | 2 +- .../PutMappingClusterStateUpdateRequest.java | 11 --- .../mapping/put/PutMappingRequest.java | 20 ++--- .../mapping/put/PutMappingRequestBuilder.java | 6 -- .../put/TransportPutMappingAction.java | 1 - .../rollover/TransportRolloverAction.java | 2 +- .../indices/shrink/TransportResizeAction.java | 2 +- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/MetaDataIndexAliasesService.java | 2 +- .../MetaDataIndexTemplateService.java | 2 +- .../metadata/MetaDataIndexUpgradeService.java | 2 +- .../metadata/MetaDataMappingService.java | 8 +- .../index/mapper/CompletionFieldMapper.java | 8 +- .../index/mapper/DateFieldMapper.java | 8 +- .../index/mapper/DocumentMapper.java | 4 +- .../index/mapper/DocumentParser.java | 4 +- .../index/mapper/FieldMapper.java | 9 +- .../index/mapper/FieldNamesFieldMapper.java | 11 --- .../index/mapper/FieldTypeLookup.java | 83 ++++--------------- .../index/mapper/GeoPointFieldMapper.java | 4 +- .../index/mapper/GeoShapeFieldMapper.java | 17 +--- .../index/mapper/IdFieldMapper.java | 2 +- .../index/mapper/IndexFieldMapper.java | 2 +- .../index/mapper/IpFieldMapper.java | 4 +- .../index/mapper/KeywordFieldMapper.java | 8 +- .../index/mapper/MappedFieldType.java | 23 +---- .../elasticsearch/index/mapper/Mapper.java | 2 +- .../index/mapper/MapperService.java | 35 ++++---- .../elasticsearch/index/mapper/Mapping.java | 8 +- .../index/mapper/MetadataFieldMapper.java | 4 +- .../index/mapper/NumberFieldMapper.java | 4 +- .../index/mapper/ObjectMapper.java | 11 ++- .../index/mapper/ParentFieldMapper.java | 4 +- .../index/mapper/ParsedDocument.java | 2 +- .../index/mapper/RangeFieldMapper.java | 27 +----- .../index/mapper/RootObjectMapper.java | 8 +- .../index/mapper/RoutingFieldMapper.java | 2 +- .../index/mapper/SeqNoFieldMapper.java | 2 +- .../index/mapper/SourceFieldMapper.java | 2 +- .../index/mapper/TextFieldMapper.java | 29 +------ .../index/mapper/TypeFieldMapper.java | 2 +- .../index/mapper/UidFieldMapper.java | 2 +- .../index/mapper/VersionFieldMapper.java | 2 +- .../index/shard/StoreRecovery.java | 2 +- .../elasticsearch/indices/IndicesService.java | 2 +- .../admin/indices/RestCreateIndexAction.java | 1 - .../admin/indices/RestPutMappingAction.java | 1 - .../admin/indices/create/CreateIndexIT.java | 2 +- .../metadata/IndexCreationTaskTests.java | 4 +- .../index/mapper/AllFieldMapperTests.java | 2 +- .../index/mapper/BooleanFieldMapperTests.java | 2 +- .../index/mapper/CopyToMapperTests.java | 20 ++--- .../index/mapper/DateFieldMapperTests.java | 6 +- .../mapper/DocumentMapperMergeTests.java | 34 ++++---- .../index/mapper/DynamicMappingTests.java | 12 +-- .../index/mapper/ExternalMapper.java | 2 +- .../index/mapper/FakeStringFieldMapper.java | 4 +- .../mapper/FieldNamesFieldMapperTests.java | 6 +- .../index/mapper/FieldTypeLookupTests.java | 56 ++++--------- .../mapper/GeoShapeFieldMapperTests.java | 6 +- .../index/mapper/IdFieldMapperTests.java | 4 +- .../mapper/JavaMultiFieldMergeTests.java | 16 ++-- .../index/mapper/KeywordFieldMapperTests.java | 6 +- .../index/mapper/MapperServiceTests.java | 36 ++++---- .../index/mapper/NestedObjectMapperTests.java | 16 ++-- .../index/mapper/ObjectMapperTests.java | 4 +- .../index/mapper/ParentFieldMapperTests.java | 12 +-- ...angeFieldQueryStringQueryBuilderTests.java | 2 +- .../index/mapper/RootObjectMapperTests.java | 24 +++--- .../index/mapper/SourceFieldMapperTests.java | 10 +-- .../mapper/StoredNumericValuesTests.java | 2 +- .../index/mapper/TextFieldMapperTests.java | 4 +- .../index/mapper/TypeFieldMapperTests.java | 6 +- .../index/mapper/UidFieldMapperTests.java | 4 +- .../index/mapper/UpdateMappingTests.java | 30 +++---- .../index/query/MatchQueryBuilderTests.java | 2 +- .../index/query/NestedQueryBuilderTests.java | 2 +- .../query/QueryStringQueryBuilderTests.java | 4 +- .../index/query/RangeQueryRewriteTests.java | 4 +- .../query/TermsSetQueryBuilderTests.java | 2 +- .../index/search/MultiMatchQueryTests.java | 2 +- .../mapping/UpdateMappingIntegrationIT.java | 1 - .../search/child/ParentFieldLoadingIT.java | 1 - .../index/engine/TranslogHandler.java | 2 +- .../index/mapper/FieldTypeTestCase.java | 46 ++++------ .../index/shard/IndexShardTestCase.java | 4 +- .../test/AbstractQueryTestCase.java | 4 +- 114 files changed, 368 insertions(+), 620 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index e55204c3d9473..cc3b0deff52c6 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -173,7 +173,6 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep parameters.withTimeout(createIndexRequest.timeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); - parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes()); HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); @@ -585,13 +584,6 @@ Params withTimeout(TimeValue timeout) { return putParam("timeout", timeout); } - Params withUpdateAllTypes(boolean updateAllTypes) { - if (updateAllTypes) { - return putParam("update_all_types", Boolean.TRUE.toString()); - } - return this; - } - Params withVersion(long version) { if (version != Versions.MATCH_ANY) { return putParam("version", Long.toString(version)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 56848a905a1cd..49667a3dee289 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -310,14 +310,6 @@ public void testCreateIndex() throws IOException { setRandomMasterTimeout(createIndexRequest, expectedParams); setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); - if (randomBoolean()) { - boolean updateAllTypes = randomBoolean(); - createIndexRequest.updateAllTypes(updateAllTypes); - if (updateAllTypes) { - expectedParams.put("update_all_types", Boolean.TRUE.toString()); - } - } - Request request = Request.createIndex(createIndexRequest); assertEquals("/" + indexName, request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 215282c49d7bd..ece9cac5962ab 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -13,4 +13,8 @@ The `index_options` field for numeric fields has been deprecated in 6 and has n To safeguard against out of memory errors, the number of nested json objects within a single document across all fields has been limited to 10000. This default limit can be changed with -the index setting `index.mapping.nested_objects.limit`. \ No newline at end of file +the index setting `index.mapping.nested_objects.limit`. + +==== The `update_all_types` option has been removed + +This option is useless now that all indices have at most one type. diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 96ec29e2aa695..5770c91cfdb7e 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -207,8 +207,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) { conflicts.add("mapper [" + name() + "] has different [scaling_factor] values"); } @@ -424,8 +424,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java index c18b66cf61855..7a777963baa4e 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/TokenCountFieldMapper.java @@ -202,8 +202,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements; } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java index 633f10276096c..13c4e87f95efc 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/TokenCountFieldMapperTests.java @@ -63,7 +63,7 @@ public void testMerge() throws IOException { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper stage1 = mapperService.merge("person", - new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = XContentFactory.jsonBuilder().startObject() .startObject("person") @@ -75,7 +75,7 @@ public void testMerge() throws IOException { .endObject() .endObject().endObject().string(); DocumentMapper stage2 = mapperService.merge("person", - new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); // previous mapper has not been modified assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java index 21078c2763f1c..8130acac1af72 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java @@ -194,8 +194,8 @@ protected void parseCreateField(ParseContext context, List field @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ParentIdFieldMapper parentMergeWith = (ParentIdFieldMapper) mergeWith; this.children = parentMergeWith.children; } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index b2ec28cf0c86b..d3164ae6a12da 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -316,8 +316,8 @@ public ParentIdFieldMapper getParentIdFieldMapper(String name, boolean isParent) } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); ParentJoinFieldMapper joinMergeWith = (ParentJoinFieldMapper) mergeWith; List conflicts = new ArrayList<>(); for (ParentIdFieldMapper mapper : parentIdFields) { @@ -347,7 +347,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { conflicts.add("cannot remove child [" + child + "] in join field [" + name() + "]"); } } - ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper, updateAllTypes); + ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper); newParentIdFields.add(merged); } } @@ -356,7 +356,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { } this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals; this.parentIdFields = Collections.unmodifiableList(newParentIdFields); - this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper, updateAllTypes); + this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper); uniqueFieldMapper.setFieldMapper(this); } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java index 6bfc9b87b78ce..285e7e80195af 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java @@ -57,7 +57,7 @@ public void testSingleLevel() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); // Doc without join @@ -106,7 +106,7 @@ public void testParentIdSpecifiedAsNumber() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "2", XContentFactory.jsonBuilder().startObject() .startObject("join_field") @@ -141,7 +141,7 @@ public void testMultipleLevels() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); // Doc without join @@ -221,7 +221,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IndexService indexService = createIndex("test"); DocumentMapper docMapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); { @@ -235,7 +235,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot remove parent [parent] in join field [join_field]")); } @@ -251,7 +251,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot remove child [grand_child2] in join field [join_field]")); } @@ -268,7 +268,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot create child [parent] from an existing parent")); } @@ -285,7 +285,7 @@ public void testUpdateRelations() throws Exception { .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("cannot create parent [grand_child2] from an existing child]")); } @@ -300,7 +300,7 @@ public void testUpdateRelations() throws Exception { .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertTrue(mapper.hasChild("child2")); @@ -321,7 +321,7 @@ public void testUpdateRelations() throws Exception { .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); assertTrue(mapper.hasParent("other")); @@ -349,7 +349,7 @@ public void testInvalidJoinFieldInsideObject() throws Exception { IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows(MapperParsingException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getRootCause().getMessage(), containsString("join field [object.join_field] cannot be added inside an object or in a multi-field")); } @@ -371,7 +371,7 @@ public void testInvalidJoinFieldInsideMultiFields() throws Exception { IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows(MapperParsingException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false)); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getRootCause().getMessage(), containsString("join field [number.join_field] cannot be added inside an object or in a multi-field")); } @@ -397,7 +397,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); } @@ -414,7 +414,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); String updateMapping = XContentFactory.jsonBuilder().startObject() .startObject("properties") .startObject("another_join_field") @@ -423,7 +423,7 @@ public void testMultipleJoinFields() throws Exception { .endObject() .endObject().string(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE, false)); + new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); } } @@ -442,7 +442,7 @@ public void testEagerGlobalOrdinals() throws Exception { .endObject().string(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fullName("join_field#parent")); @@ -463,7 +463,7 @@ public void testEagerGlobalOrdinals() throws Exception { .endObject() .endObject().string(); service.mapperService().merge("type", new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fullName("join_field#parent")); assertFalse(service.mapperService().fullName("join_field#parent").eagerGlobalOrdinals()); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 885c19c6cd45a..0ec6bec977e2e 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -132,7 +132,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index 1776df49e1850..67b0051358b17 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -112,7 +112,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index d30ddf98661de..a0883d5090adb 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -97,7 +97,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -107,7 +107,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java index 8517348721e30..bd2c816b56566 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java @@ -88,7 +88,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -98,9 +98,9 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java index f613e58498ace..d88f5b944c32d 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java @@ -72,7 +72,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, "_parent", "type=" + PARENT_TYPE, STRING_FIELD_NAME, "type=text", @@ -81,7 +81,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java index 375923ebd9ab2..7c6dea967f344 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java @@ -104,7 +104,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject().endObject().endObject(); mapperService.merge(TYPE, - new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 93b2eb9d5550f..38844b2352b6e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -136,13 +136,13 @@ public void init() throws Exception { .startObject("ip_field").field("type", "ip").endObject() .startObject("field").field("type", "keyword").endObject() .endObject().endObject().endObject().string(); - documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true); + documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); String queryField = "query_field"; String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject() .endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper("type").mappers().getMapper(queryField); fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 122cabc79eab6..428a10b809d68 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -98,10 +98,10 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws String docType = "_doc"; mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, queryField, "type=percolator" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, STRING_FIELD_NAME, "type=text" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); if (mapperService.getIndexSettings().isSingleType() == false) { PercolateQueryBuilderTests.docType = docType; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index c8ac4212258a3..65cf23f8d6026 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -156,7 +156,7 @@ public void init() throws Exception { .startObject("number_field7").field("type", "ip").endObject() .startObject("date_field").field("type", "date").endObject() .endObject().endObject().endObject().string(); - mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); } private void addQueryFieldMappings() throws Exception { @@ -164,7 +164,7 @@ private void addQueryFieldMappings() throws Exception { String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("doc") .startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject() .endObject().endObject().string(); - mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); fieldType = (PercolatorFieldMapper.FieldType) mapperService.fullName(fieldName); } @@ -578,7 +578,7 @@ public void testAllowNoAdditionalSettings() throws Exception { .startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject() .endObject().endObject().string(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> - mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true)); + mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]")); } @@ -592,7 +592,7 @@ public void testMultiplePercolatorFields() throws Exception { .startObject("query_field2").field("type", "percolator").endObject() .endObject() .endObject().endObject().string(); - mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", @@ -623,7 +623,7 @@ public void testNestedPercolatorField() throws Exception { .endObject() .endObject() .endObject().endObject().string(); - mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index f927f920f9097..a4502a953dbe0 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -99,8 +99,8 @@ public boolean equals(Object o) { } @Override - public void checkCompatibility(MappedFieldType otherFT, List conflicts, boolean strict) { - super.checkCompatibility(otherFT, conflicts, strict); + public void checkCompatibility(MappedFieldType otherFT, List conflicts) { + super.checkCompatibility(otherFT, conflicts); CollationFieldType other = (CollationFieldType) otherFT; if (!Objects.equals(collator, other.collator)) { conflicts.add("mapper [" + name() + "] has different [collator]"); @@ -619,8 +619,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); List conflicts = new ArrayList<>(); ICUCollationKeywordFieldMapper icuMergeWith = (ICUCollationKeywordFieldMapper) mergeWith; diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index 060a94a9d27b4..88f92d0aad8ba 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -434,7 +434,7 @@ public void testUpdateCollator() throws IOException { .field("language", "tr") .field("strength", "primary") .endObject().endObject().endObject().endObject().string(); - indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") @@ -443,7 +443,7 @@ public void testUpdateCollator() throws IOException { .endObject().endObject().endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE + "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage()); } diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index b2974f139fb35..d3830ab210662 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -183,7 +183,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { this.enabledState = sizeFieldMapperMergeWith.enabledState; diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 2cde1b1bd07d2..c433f0d256a97 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -110,7 +110,7 @@ public void testThatDisablingWorksWhenMerging() throws Exception { .startObject("_size").field("enabled", false).endObject() .endObject().endObject().string(); docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index 1433c893e251e..f876df36f882b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -24,10 +24,6 @@ "master_timeout": { "type" : "time", "description" : "Specify timeout for connection to master" - }, - "update_all_types": { - "type": "boolean", - "description": "Whether to update the mapping for all fields with the same name across all types or not" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index 5fce0bcefc89a..c6b547914ef79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -38,10 +38,6 @@ "options" : ["open","closed","none","all"], "default" : "open", "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "update_all_types": { - "type": "boolean", - "description": "Whether to update the mapping for all fields with the same name across all types or not" } } }, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 1734c340bd4ef..4e2e257887512 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -43,7 +43,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final String cause; private final String index; private final String providedName; - private final boolean updateAllTypes; private Index recoverFrom; private ResizeType resizeType; @@ -61,12 +60,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName, - boolean updateAllTypes) { + public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName) { this.originalMessage = originalMessage; this.cause = cause; this.index = index; - this.updateAllTypes = updateAllTypes; this.providedName = providedName; } @@ -155,11 +152,6 @@ public Index recoverFrom() { return recoverFrom; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - /** * The name that was provided by the user. This might contain a date math expression. * @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 17941b582ec31..12f9f75619412 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -85,8 +85,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Map customs = new HashMap<>(); - private boolean updateAllTypes = false; - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public CreateIndexRequest() { @@ -429,17 +427,6 @@ public Map customs() { return this.customs; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - - /** See {@link #updateAllTypes()} */ - public CreateIndexRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } - public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -499,7 +486,9 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(Alias.read(in)); } - updateAllTypes = in.readBoolean(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readBoolean(); // updateAllTypes + } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -523,7 +512,9 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - out.writeBoolean(updateAllTypes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(true); // updateAllTypes + } waitForActiveShards.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index fabe269124e9e..b42b4e9236f0e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -239,12 +239,6 @@ public CreateIndexRequestBuilder setSource(XContentBuilder source) { return this; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { - request.updateAllTypes(updateAllTypes); - return this; - } - /** * Sets the number of shard copies that should be active for index creation to return. * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 372c2eb861237..4cf159c439cb5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -72,7 +72,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt } final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes()) + final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) .aliases(request.aliases()).customs(request.customs()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index 0f396afa5513b..0deb63ba285ff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -30,8 +30,6 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda private String source; - private boolean updateAllTypes = false; - public PutMappingClusterStateUpdateRequest() { } @@ -53,13 +51,4 @@ public PutMappingClusterStateUpdateRequest source(String source) { this.source = source; return this; } - - public boolean updateAllTypes() { - return updateAllTypes; - } - - public PutMappingClusterStateUpdateRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index eecbbc453ee4d..7b6c8f6eb6f40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -72,7 +72,6 @@ public class PutMappingRequest extends AcknowledgedRequest im private String source; - private boolean updateAllTypes = false; private Index concreteIndex; public PutMappingRequest() { @@ -290,17 +289,6 @@ public PutMappingRequest source(BytesReference mappingSource, XContentType xCont } } - /** True if all fields that span multiple types should be updated, false otherwise */ - public boolean updateAllTypes() { - return updateAllTypes; - } - - /** See {@link #updateAllTypes()} */ - public PutMappingRequest updateAllTypes(boolean updateAllTypes) { - this.updateAllTypes = updateAllTypes; - return this; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -312,7 +300,9 @@ public void readFrom(StreamInput in) throws IOException { // we do not know the format from earlier versions so convert if necessary source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); } - updateAllTypes = in.readBoolean(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readBoolean(); // updateAllTypes + } concreteIndex = in.readOptionalWriteable(Index::new); } @@ -323,7 +313,9 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeOptionalString(type); out.writeString(source); - out.writeBoolean(updateAllTypes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(true); // updateAllTypes + } out.writeOptionalWriteable(concreteIndex); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 43bfe78c4871b..7baba39d96a29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -98,10 +98,4 @@ public PutMappingRequestBuilder setSource(Object... source) { return this; } - /** True if all fields that span multiple types should be updated, false otherwise */ - public PutMappingRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { - request.updateAllTypes(updateAllTypes); - return this; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index d9ebf88fda6d7..e10a20096fa30 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -82,7 +82,6 @@ protected void masterOperation(final PutMappingRequest request, final ClusterSta PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) - .updateAllTypes(request.updateAllTypes()) .source(request.source()); metaDataMappingService.putMapping(updateRequest, new ActionListener() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 2ed5192e6cfb2..ded01077da2af 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -232,7 +232,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Stri createIndexRequest.cause("rollover_index"); createIndexRequest.index(targetIndexName); return new CreateIndexClusterStateUpdateRequest(createIndexRequest, - "rollover_index", targetIndexName, providedIndexName, true) + "rollover_index", targetIndexName, providedIndexName) .ackTimeout(createIndexRequest.timeout()) .masterNodeTimeout(createIndexRequest.masterNodeTimeout()) .settings(createIndexRequest.settings()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 688d33a0be734..28fc994a4677e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -179,7 +179,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi targetIndex.settings(settingsBuilder); return new CreateIndexClusterStateUpdateRequest(targetIndex, - cause, targetIndex.index(), targetIndexName, true) + cause, targetIndex.index(), targetIndexName) // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss // the mappings for everything is corrupted and hard to debug diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 28a7570ca5582..344c424a62484 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -444,7 +444,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now add the mappings MapperService mapperService = indexService.mapperService(); try { - mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); + mapperService.merge(mappings, MergeReason.MAPPING_UPDATE); } catch (Exception e) { removalExtraInfo = "failed on parsing default mapping/mappings on index creation"; throw e; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 0949032db1665..37831f977aec7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -144,7 +144,7 @@ ClusterState innerExecute(ClusterState currentState, Iterable actio } catch (IOException e) { throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); } - indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false); + indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY); } indices.put(action.getIndex(), indexService); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 9d8da37cbeeba..de065a4b922f6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -250,7 +250,7 @@ private static void validateAndAddTemplate(final PutRequest request, IndexTempla mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false); + dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE); } finally { if (createdIndex != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index e17b9fbb4d56a..a9301056f5ae0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -187,7 +187,7 @@ public Set> entrySet() { try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); - mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); } } catch (Exception ex) { // Wrap the inner exception so we have the index name in the exception message diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 12a56f00bd4f0..a116bc369b5e4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -147,7 +147,7 @@ ClusterState executeRefresh(final ClusterState currentState, final List execute(ClusterSt MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); indexMapperServices.put(index, mapperService); // add mappings for all types, we need them for cross-type validation - mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY); } } currentState = applyRequest(currentState, request, indexMapperServices); @@ -264,7 +264,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate: just call merge and ignore the result - existingMapper.merge(newMapper.mapping(), request.updateAllTypes()); + existingMapper.merge(newMapper.mapping()); } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about @@ -310,7 +310,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt if (existingMapper != null) { existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); + DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 186334c85cb33..0c03e8a551f6f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -324,8 +324,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); CompletionFieldType other = (CompletionFieldType)fieldType; if (preservePositionIncrements != other.preservePositionIncrements) { @@ -607,8 +607,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; this.maxInputLength = fieldMergeWith.maxInputLength; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 3b21a3bd7400b..00e09112deed2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -219,8 +219,8 @@ public String typeName() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); DateFieldType other = (DateFieldType) fieldType; if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) { conflicts.add("mapper [" + name() + "] has different [format] values"); @@ -472,8 +472,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); final DateFieldMapper other = (DateFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index e1e33739ac4b5..42f842e612803 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -296,8 +296,8 @@ public boolean isParent(String type) { return mapperService.getParentTypes().contains(type); } - public DocumentMapper merge(Mapping mapping, boolean updateAllTypes) { - Mapping merged = this.mapping.merge(mapping, updateAllTypes); + public DocumentMapper merge(Mapping mapping) { + Mapping merged = this.mapping.merge(mapping); return new DocumentMapper(mapperService, merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 596581a15a22c..aa286b883468f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -218,7 +218,7 @@ static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, Li // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. - newMapper.merge(previousMapper, false); + newMapper.merge(previousMapper); continue; } previousMapper = newMapper; @@ -275,7 +275,7 @@ private static void addToLastMapper(List parentMappers, Mapper map int lastIndex = parentMappers.size() - 1; ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); if (merge) { - withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false); + withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper); } parentMappers.set(lastIndex, withNewMapper); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index c6e0dd9c00b72..f23a8d0ce96aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -312,17 +312,16 @@ protected FieldMapper clone() { } @Override - public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + public FieldMapper merge(Mapper mergeWith) { FieldMapper merged = clone(); - merged.doMerge(mergeWith, updateAllTypes); + merged.doMerge(mergeWith); return merged; } /** * Merge changes coming from {@code mergeWith} in place. - * @param updateAllTypes TODO */ - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof FieldMapper) { @@ -553,7 +552,7 @@ public MultiFields merge(MultiFields mergeWith) { if (mergeIntoMapper == null) { newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); } else { - FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); + FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper); newMappersBuilder.put(merged.simpleName(), merged); // override previous definition } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 8482a94cfc74c..ada640f873975 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -165,17 +165,6 @@ public String typeName() { return CONTENT_TYPE; } - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); - if (strict) { - FieldNamesFieldType other = (FieldNamesFieldType)fieldType; - if (isEnabled() != other.isEnabled()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [enabled] across all types."); - } - } - } - public void setEnabled(boolean enabled) { checkIfFrozen(); this.enabled = enabled; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index fee41e43f2a3c..069468ddb7a25 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -39,37 +38,13 @@ class FieldTypeLookup implements Iterable { /** Full field name to field type */ final CopyOnWriteHashMap fullNameToFieldType; - /** Full field name to types containing a mapping for this full name. */ - final CopyOnWriteHashMap> fullNameToTypes; - /** Create a new empty instance. */ FieldTypeLookup() { fullNameToFieldType = new CopyOnWriteHashMap<>(); - fullNameToTypes = new CopyOnWriteHashMap<>(); } - private FieldTypeLookup( - CopyOnWriteHashMap fullName, - CopyOnWriteHashMap> fullNameToTypes) { + private FieldTypeLookup(CopyOnWriteHashMap fullName) { this.fullNameToFieldType = fullName; - this.fullNameToTypes = fullNameToTypes; - } - - private static CopyOnWriteHashMap> addType(CopyOnWriteHashMap> map, String key, String type) { - Set types = map.get(key); - if (types == null) { - return map.copyAndPut(key, Collections.singleton(type)); - } else if (types.contains(type)) { - // noting to do - return map; - } else { - Set newTypes = new HashSet<>(types.size() + 1); - newTypes.addAll(types); - newTypes.add(type); - assert newTypes.size() == types.size() + 1; - newTypes = Collections.unmodifiableSet(newTypes); - return map.copyAndPut(key, newTypes); - } } /** @@ -77,58 +52,41 @@ private static CopyOnWriteHashMap> addType(CopyOnWriteHashMa * from the provided fields. If a field already exists, the field type will be updated * to use the new mappers field type. */ - public FieldTypeLookup copyAndAddAll(String type, Collection fieldMappers, boolean updateAllTypes) { + public FieldTypeLookup copyAndAddAll(String type, Collection fieldMappers) { Objects.requireNonNull(type, "type must not be null"); if (MapperService.DEFAULT_MAPPING.equals(type)) { throw new IllegalArgumentException("Default mappings should not be added to the lookup"); } CopyOnWriteHashMap fullName = this.fullNameToFieldType; - CopyOnWriteHashMap> fullNameToTypes = this.fullNameToTypes; for (FieldMapper fieldMapper : fieldMappers) { MappedFieldType fieldType = fieldMapper.fieldType(); MappedFieldType fullNameFieldType = fullName.get(fieldType.name()); - // is the update even legal? - checkCompatibility(type, fieldMapper, updateAllTypes); - - if (fieldType.equals(fullNameFieldType) == false) { + if (fullNameFieldType == null) { + // introduction of a new field fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType()); + } else { + // modification of an existing field + checkCompatibility(fullNameFieldType, fieldType); + if (fieldType.equals(fullNameFieldType) == false) { + fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType()); + } } - - fullNameToTypes = addType(fullNameToTypes, fieldType.name(), type); - } - return new FieldTypeLookup(fullName, fullNameToTypes); - } - - private static boolean beStrict(String type, Set types, boolean updateAllTypes) { - assert types.size() >= 1; - if (updateAllTypes) { - return false; - } else if (types.size() == 1 && types.contains(type)) { - // we are implicitly updating all types - return false; - } else { - return true; } + return new FieldTypeLookup(fullName); } /** * Checks if the given field type is compatible with an existing field type. * An IllegalArgumentException is thrown in case of incompatibility. - * If updateAllTypes is true, only basic compatibility is checked. */ - private void checkCompatibility(String type, FieldMapper fieldMapper, boolean updateAllTypes) { - MappedFieldType fieldType = fullNameToFieldType.get(fieldMapper.fieldType().name()); - if (fieldType != null) { - List conflicts = new ArrayList<>(); - final Set types = fullNameToTypes.get(fieldMapper.fieldType().name()); - boolean strict = beStrict(type, types, updateAllTypes); - fieldType.checkCompatibility(fieldMapper.fieldType(), conflicts, strict); - if (conflicts.isEmpty() == false) { - throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().name() + "] conflicts with existing mapping in other types:\n" + conflicts.toString()); - } + private void checkCompatibility(MappedFieldType existingFieldType, MappedFieldType newFieldType) { + List conflicts = new ArrayList<>(); + existingFieldType.checkCompatibility(newFieldType, conflicts); + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Mapper for [" + newFieldType.name() + "] conflicts with existing mapping:\n" + conflicts.toString()); } } @@ -137,15 +95,6 @@ public MappedFieldType get(String field) { return fullNameToFieldType.get(field); } - /** Get the set of types that have a mapping for the given field. */ - public Set getTypes(String field) { - Set types = fullNameToTypes.get(field); - if (types == null) { - types = Collections.emptySet(); - } - return types; - } - /** * Returns a list of the full names of a simple match regex like pattern against full name and index name. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 45237eb572d2c..7b9eb5f067a67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -142,8 +142,8 @@ public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedF } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); GeoPointFieldMapper gpfmMergeWith = (GeoPointFieldMapper) mergeWith; if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 68d6ac66678e7..9e2a17817acde 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -309,8 +309,8 @@ public void freeze() { } @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); + public void checkCompatibility(MappedFieldType fieldType, List conflicts) { + super.checkCompatibility(fieldType, conflicts); GeoShapeFieldType other = (GeoShapeFieldType)fieldType; // prevent user from changing strategies if (strategyName().equals(other.strategyName()) == false) { @@ -334,15 +334,6 @@ public void checkCompatibility(MappedFieldType fieldType, List conflicts if (precisionInMeters() != other.precisionInMeters()) { conflicts.add("mapper [" + name() + "] has different [precision]"); } - - if (strict) { - if (orientation() != other.orientation()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types."); - } - if (distanceErrorPct() != other.distanceErrorPct()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types."); - } - } } private static int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) { @@ -511,8 +502,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; if (gsfm.coerce.explicit()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index 41256d3a5bb58..e60b27fce7239 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -314,7 +314,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 1bdb125b4e7cb..8e92ecc8bf686 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -189,7 +189,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index bc811d041e313..c10c2339b895e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -390,8 +390,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); IpFieldMapper other = (IpFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index cb2c4b6b6fddf..76163929e68ae 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -187,8 +187,8 @@ public boolean equals(Object o) { } @Override - public void checkCompatibility(MappedFieldType otherFT, List conflicts, boolean strict) { - super.checkCompatibility(otherFT, conflicts, strict); + public void checkCompatibility(MappedFieldType otherFT, List conflicts) { + super.checkCompatibility(otherFT, conflicts); KeywordFieldType other = (KeywordFieldType) otherFT; if (Objects.equals(normalizer, other.normalizer) == false) { conflicts.add("mapper [" + name() + "] has different [normalizer]"); @@ -352,8 +352,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 6eab90875345b..69189ab129762 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -157,7 +157,7 @@ private void checkTypeName(MappedFieldType other) { * If strict is true, all properties must be equal. * Otherwise, only properties which must never change in an index are checked. */ - public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { + public void checkCompatibility(MappedFieldType other, List conflicts) { checkTypeName(other); boolean indexed = indexOptions() != IndexOptions.NONE; @@ -202,27 +202,6 @@ public void checkCompatibility(MappedFieldType other, List conflicts, bo if (Objects.equals(similarity(), other.similarity()) == false) { conflicts.add("mapper [" + name() + "] has different [similarity]"); } - - if (strict) { - if (omitNorms() != other.omitNorms()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [omit_norms] across all types."); - } - if (boost() != other.boost()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); - } - if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); - } - if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types."); - } - if (Objects.equals(nullValue(), other.nullValue()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types."); - } - if (eagerGlobalOrdinals() != other.eagerGlobalOrdinals()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [eager_global_ordinals] across all types."); - } - } } public String name() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 4f78ec0ad9561..051ac9da7f2ec 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -175,7 +175,7 @@ public final String simpleName() { /** Return the merge of {@code mergeWith} into this. * Both {@code this} and {@code mergeWith} will be left unmodified. */ - public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); + public abstract Mapper merge(Mapper mergeWith); /** * Update the field type of this mapper. This is necessary because some mapping updates diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 51ebe9d980b7a..a04673eca4c38 100755 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -215,7 +215,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { final Map updatedEntries; try { // only update entries if needed - updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true); + updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); } catch (Exception e) { logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; @@ -250,7 +250,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { return requireRefresh; } - public void merge(Map> mappings, MergeReason reason, boolean updateAllTypes) { + public void merge(Map> mappings, MergeReason reason) { Map mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); for (Map.Entry> entry : mappings.entrySet()) { try { @@ -260,19 +260,18 @@ public void merge(Map> mappings, MergeReason reason, } } - internalMerge(mappingSourcesCompressed, reason, updateAllTypes); + internalMerge(mappingSourcesCompressed, reason); } - public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) { - internalMerge(indexMetaData, reason, updateAllTypes, false); + public void merge(IndexMetaData indexMetaData, MergeReason reason) { + internalMerge(indexMetaData, reason, false); } - public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) { - return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type); + public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) { + return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type); } - private synchronized Map internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes, - boolean onlyUpdateIfNeeded) { + private synchronized Map internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) { Map map = new LinkedHashMap<>(); for (ObjectCursor cursor : indexMetaData.getMappings().values()) { MappingMetaData mappingMetaData = cursor.value; @@ -285,10 +284,10 @@ private synchronized Map internalMerge(IndexMetaData ind map.put(mappingMetaData.type(), mappingMetaData.source()); } } - return internalMerge(map, reason, updateAllTypes); + return internalMerge(map, reason); } - private synchronized Map internalMerge(Map mappings, MergeReason reason, boolean updateAllTypes) { + private synchronized Map internalMerge(Map mappings, MergeReason reason) { DocumentMapper defaultMapper = null; String defaultMappingSource = null; @@ -336,7 +335,7 @@ private synchronized Map internalMerge(Map internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource, - List documentMappers, MergeReason reason, boolean updateAllTypes) { + List documentMappers, MergeReason reason) { boolean hasNested = this.hasNested; Map fullPathObjectMappers = this.fullPathObjectMappers; FieldTypeLookup fieldTypes = this.fieldTypes; @@ -392,7 +391,7 @@ private synchronized Map internalMerge(@Nullable Documen DocumentMapper oldMapper = mappers.get(mapper.type()); DocumentMapper newMapper; if (oldMapper != null) { - newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes); + newMapper = oldMapper.merge(mapper.mapping()); } else { newMapper = mapper; } @@ -403,12 +402,12 @@ private synchronized Map internalMerge(@Nullable Documen Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers); MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers); checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes); - checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers); + checkObjectsCompatibility(objectMappers, fullPathObjectMappers); checkPartitionedIndexConstraints(newMapper); // update lookup data-structures // this will in particular make sure that the merged fields are compatible with other types - fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes); + fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers); for (ObjectMapper objectMapper : objectMappers) { if (fullPathObjectMappers == this.fullPathObjectMappers) { @@ -575,14 +574,14 @@ private static void checkFieldUniqueness(String type, Collection o } } - private static void checkObjectsCompatibility(Collection objectMappers, boolean updateAllTypes, + private static void checkObjectsCompatibility(Collection objectMappers, Map fullPathObjectMappers) { for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { // simulate a merge and ignore the result, we are just interested // in exceptions here - existingObjectMapper.merge(newObjectMapper, updateAllTypes); + existingObjectMapper.merge(newObjectMapper); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 8a90de4d47aa5..bd92cf6d00970 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -84,9 +84,9 @@ public T metadataMapper(Class clazz) { return (T) metadataMappersMap.get(clazz); } - /** @see DocumentMapper#merge(Mapping, boolean) */ - public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { - RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); + /** @see DocumentMapper#merge(Mapping) */ + public Mapping merge(Mapping mergeWith) { + RootObjectMapper mergedRoot = root.merge(mergeWith.root); Map, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); @@ -94,7 +94,7 @@ public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { if (mergeInto == null) { merged = metaMergeWith; } else { - merged = mergeInto.merge(metaMergeWith, updateAllTypes); + merged = mergeInto.merge(metaMergeWith); } mergedMetaDataMappers.put(merged.getClass(), merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 264c2abd56820..1240250a74743 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -67,7 +67,7 @@ protected MetadataFieldMapper(String simpleName, MappedFieldType fieldType, Mapp public abstract void postParse(ParseContext context) throws IOException; @Override - public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { - return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); + public MetadataFieldMapper merge(Mapper mergeWith) { + return (MetadataFieldMapper) super.merge(mergeWith); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index a44611d6406a1..92cb44cfd147f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1019,8 +1019,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); NumberFieldMapper other = (NumberFieldMapper) mergeWith; if (other.ignoreMalformed.explicit()) { this.ignoreMalformed = other.ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index d83ce173d6896..c96d8bb384bb6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.ScriptDocValues; import java.io.IOException; import java.util.ArrayList; @@ -139,7 +138,7 @@ public Y build(BuilderContext context) { Mapper mapper = builder.build(context); Mapper existing = mappers.get(mapper.simpleName()); if (existing != null) { - mapper = existing.merge(mapper, false); + mapper = existing.merge(mapper); } mappers.put(mapper.simpleName(), mapper); } @@ -426,17 +425,17 @@ public boolean parentObjectMapperAreNested(MapperService mapperService) { } @Override - public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { + public ObjectMapper merge(Mapper mergeWith) { if (!(mergeWith instanceof ObjectMapper)) { throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper merged = clone(); - merged.doMerge(mergeWithObject, updateAllTypes); + merged.doMerge(mergeWithObject); return merged; } - protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { + protected void doMerge(final ObjectMapper mergeWith) { if (nested().isNested()) { if (!mergeWith.nested().isNested()) { throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); @@ -459,7 +458,7 @@ protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { merged = mergeWithMapper; } else { // root mappers can only exist here for backcompat, and are merged in Mapping - merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); + merged = mergeIntoMapper.merge(mergeWithMapper); } putMapper(merged); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 34eaf569ca949..1d3588ae5a745 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -301,7 +301,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); @@ -310,7 +310,7 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // update that does not explicitly configure the _parent field, so we // ignore it. if (fieldMergeWith.active()) { - super.doMerge(mergeWith, updateAllTypes); + super.doMerge(mergeWith); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 11804c2e88e1d..0c740a0af7c8b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -131,7 +131,7 @@ public void addDynamicMappingsUpdate(Mapping update) { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = update; } else { - dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 9a00ddebe83ba..1536db6510fc7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -256,29 +256,6 @@ public String typeName() { return rangeType.name; } - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { - super.checkCompatibility(fieldType, conflicts, strict); - if (strict) { - RangeFieldType other = (RangeFieldType)fieldType; - if (this.rangeType != other.rangeType) { - conflicts.add("mapper [" + name() - + "] is attempting to update from type [" + rangeType.name - + "] to incompatible type [" + other.rangeType.name + "]."); - } - if (this.rangeType == RangeType.DATE) { - if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) { - conflicts.add("mapper [" + name() - + "] is used by multiple types. Set update_all_types to true to update [format] across all types."); - } - if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) { - conflicts.add("mapper [" + name() - + "] is used by multiple types. Set update_all_types to true to update [locale] across all types."); - } - } - } - } - public FormatDateTimeFormatter dateTimeFormatter() { return dateTimeFormatter; } @@ -416,8 +393,8 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); RangeFieldMapper other = (RangeFieldMapper) mergeWith; if (other.coerce.explicit()) { this.coerce = other.coerce; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 42341bfb96b2d..009caf2b8e814 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -268,13 +268,13 @@ public DynamicTemplate findTemplate(ContentPath path, String name, XContentField } @Override - public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { - return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); + public RootObjectMapper merge(Mapper mergeWith) { + return (RootObjectMapper) super.merge(mergeWith); } @Override - protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(ObjectMapper mergeWith) { + super.doMerge(mergeWith); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; if (mergeWithObject.numericDetection.explicit()) { this.numericDetection = mergeWithObject.numericDetection; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index a4b009f9f1fa3..25cfc71261b0a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -201,7 +201,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 01b302797e2ec..197d555736343 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -278,7 +278,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 47d5e64438e57..b4a8330e23803 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -291,7 +291,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; List conflicts = new ArrayList<>(); if (this.enabled != sourceMergeWith.enabled) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index ae99f743fe57f..4d67ec4cfbc19 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -212,31 +212,6 @@ public int hashCode() { fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); } - @Override - public void checkCompatibility(MappedFieldType other, - List conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); - TextFieldType otherType = (TextFieldType) other; - if (strict) { - if (fielddata() != otherType.fielddata()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " - + "across all types."); - } - if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.min] across all types."); - } - if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.max] across all types."); - } - if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[fielddata_frequency_filter.min_segment_size] across all types."); - } - } - } - public boolean fielddata() { return fielddata; } @@ -357,8 +332,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 712e9edec9e27..b47242d02b0f3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -316,7 +316,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java index 95dc40bca637a..04e791b8cee1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -229,7 +229,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // do nothing here, no merging, but also no exception } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index c5ead1327cc9b..bedb98e2126ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -145,7 +145,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // nothing to do } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 81ffbea642c58..c3b4525924ae3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -112,7 +112,7 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate for (ObjectObjectCursor mapping : sourceMetaData.getMappings()) { mappingUpdateConsumer.accept(mapping.key, mapping.value); } - indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY); // now that the mapping is merged we can validate the index sort configuration. Sort indexSort = indexShard.getIndexSort(); final boolean hasNested = indexShard.mapperService().hasNested(); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 7e0bff5384183..f7c4a39eee380 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -495,7 +495,7 @@ public synchronized void verifyIndexMetadata(IndexMetaData metaData, IndexMetaDa final IndexService service = createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); closeables.add(() -> service.close("metadata verification", false)); - service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY); if (metaData.equals(metaDataUpdate) == false) { service.updateMetaData(metaDataUpdate); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 6a741fd3951d3..0934d8557158b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -49,7 +49,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (request.hasContent()) { createIndexRequest.source(request.content(), request.getXContentType()); } - createIndexRequest.updateAllTypes(request.paramAsBoolean("update_all_types", false)); createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 8d7e4a9e6c836..cdac83037db30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -70,7 +70,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.requiredContent(), request.getXContentType()); - putMappingRequest.updateAllTypes(request.paramAsBoolean("update_all_types", false)); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 14d6647071453..df63613b5b97d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -280,7 +280,7 @@ public void testMappingConflictRootCause() throws Exception { .field("type", "text") .endObject().endObject().endObject()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); - assertThat(e.getMessage(), containsString("mapper [text] is used by multiple types")); + assertThat(e.getMessage(), containsString("Mapper for [text] conflicts with existing mapping:")); } public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index f44d0b7c4036e..a315cdc820678 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -249,7 +249,7 @@ public void testRequestStateOpen() throws Exception { @SuppressWarnings("unchecked") public void testIndexRemovalOnFailure() throws Exception { - doThrow(new RuntimeException("oops")).when(mapper).merge(anyMap(), anyObject(), anyBoolean()); + doThrow(new RuntimeException("oops")).when(mapper).merge(anyMap(), anyObject()); expectThrows(RuntimeException.class, this::executeTask); @@ -333,7 +333,7 @@ private void addMatchingTemplate(MetaDataBuilderConfigurator configurator) throw @SuppressWarnings("unchecked") private Map> getMappingsFromResponse() { final ArgumentCaptor argument = ArgumentCaptor.forClass(Map.class); - verify(mapper).merge(argument.capture(), anyObject(), anyBoolean()); + verify(mapper).merge(argument.capture(), anyObject()); return argument.getValue(); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 4ccc8bc215fd7..b33d98c9b0015 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -33,7 +33,7 @@ public void testUpdateDefaultSearchAnalyzer() throws Exception { .put("index.analysis.analyzer.default_search.type", "custom") .put("index.analysis.analyzer.default_search.tokenizer", "standard").build()); String mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject().string(); - indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, indexService.mapperService().documentMapper("_doc").mapping().toString()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index e6a1c0a69d81a..bb839d8e57361 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -211,7 +211,7 @@ public void testMultiFields() throws IOException { .endObject().endObject() .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService() - .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); BytesReference source = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index a0b6a1458e24f..b4c698fa26d0b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -306,11 +306,11 @@ public void testCopyToFieldMerge() throws Exception { .endObject().endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapperBefore = mapperService.merge("type1", new CompressedXContent(mappingBefore), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(Arrays.asList("foo", "bar"), docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields()); - DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapperAfter = mapperService.merge("type1", new CompressedXContent(mappingAfter), MapperService.MergeReason.MAPPING_UPDATE); assertEquals(Arrays.asList("baz", "bar"), docMapperAfter.mappers().getMapper("copy_test").copyTo().copyToFields()); assertEquals(Arrays.asList("foo", "bar"), docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields()); @@ -438,7 +438,7 @@ public void testCopyToChildNested() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings")); XContentBuilder nestedToNestedMapping = jsonBuilder().startObject() @@ -466,7 +466,7 @@ public void testCopyToChildNested() throws Exception { .endObject(); e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(nestedToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); } public void testCopyToSiblingNested() throws Exception { @@ -496,7 +496,7 @@ public void testCopyToSiblingNested() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings")); } @@ -517,7 +517,7 @@ public void testCopyToObject() throws Exception { .endObject(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()), - MergeReason.MAPPING_UPDATE, false)); + MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Cannot copy to field [target] since it is mapped as an object")); } @@ -585,7 +585,7 @@ public void testCopyToMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertEquals("[copy_to] may not be used to copy to a multi-field: [my_field.bar]", e.getMessage()); } @@ -608,7 +608,7 @@ public void testNestedCopyTo() throws Exception { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); // no exception + mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); // no exception } public void testNestedCopyToMultiField() throws Exception { @@ -633,7 +633,7 @@ public void testNestedCopyToMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertEquals("[copy_to] may not be used to copy to a multi-field: [n.my_field.bar]", e.getMessage()); } @@ -654,7 +654,7 @@ public void testCopyFromMultiField() throws Exception { MapperService mapperService = createIndex("test").mapperService(); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.containsString("copy_to in multi fields is not allowed. Found the copy_to in field [bar] " + "which is within a multi field.")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 5776e9d618e3b..23bcba4cda76b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -380,7 +380,7 @@ public void testMergeDate() throws IOException { .startObject("release_date").field("type", "date").field("format", "yyyy/MM/dd").endObject() .endObject().endObject().endObject().string(); DocumentMapper initMapper = indexService.mapperService().merge("movie", new CompressedXContent(initMapping), - MapperService.MergeReason.MAPPING_UPDATE, randomBoolean()); + MapperService.MergeReason.MAPPING_UPDATE); assertThat(initMapper.mappers().getMapper("release_date"), notNullValue()); assertFalse(initMapper.mappers().getMapper("release_date").fieldType().stored()); @@ -392,7 +392,7 @@ public void testMergeDate() throws IOException { Exception e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("movie", new CompressedXContent(updateFormatMapping), - MapperService.MergeReason.MAPPING_UPDATE, randomBoolean())); + MapperService.MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("[mapper [release_date] has different [format] values]")); } @@ -408,7 +408,7 @@ public void testMergeText() throws Exception { DocumentMapper update = indexService.mapperService().parse("_doc", new CompressedXContent(mappingUpdate), false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapper.merge(update.mapping(), randomBoolean())); + () -> mapper.merge(update.mapping())); assertEquals("mapper [date] of different type, current_type [date], merged_type [text]", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java index 68389112bfd51..b528c2119cfe1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java @@ -53,7 +53,7 @@ public void test1Merge() throws Exception { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse("person", new CompressedXContent(stage2Mapping)); - DocumentMapper merged = stage1.merge(stage2.mapping(), false); + DocumentMapper merged = stage1.merge(stage2.mapping()); // stage1 mapping should not have been modified assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); @@ -72,7 +72,7 @@ public void testMergeObjectDynamic() throws Exception { DocumentMapper withDynamicMapper = parser.parse("type1", new CompressedXContent(withDynamicMapping)); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - DocumentMapper merged = mapper.merge(withDynamicMapper.mapping(), false); + DocumentMapper merged = mapper.merge(withDynamicMapper.mapping()); assertThat(merged.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -88,14 +88,14 @@ public void testMergeObjectAndNested() throws Exception { DocumentMapper nestedMapper = parser.parse("type1", new CompressedXContent(nestedMapping)); try { - objectMapper.merge(nestedMapper.mapping(), false); + objectMapper.merge(nestedMapper.mapping()); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested")); } try { - nestedMapper.merge(objectMapper.mapping(), false); + nestedMapper.merge(objectMapper.mapping()); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested")); @@ -115,7 +115,7 @@ public void testMergeSearchAnalyzer() throws Exception { DocumentMapper changed = parser.parse("type", new CompressedXContent(mapping2)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper merged = existing.merge(changed.mapping(), false); + DocumentMapper merged = existing.merge(changed.mapping()); assertThat(((NamedAnalyzer) merged.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); } @@ -129,8 +129,8 @@ public void testChangeSearchAnalyzerToDefault() throws Exception { .startObject("properties").startObject("field").field("type", "text").field("analyzer", "standard").endObject().endObject() .endObject().endObject().string(); - DocumentMapper existing = mapperService.merge("type", new CompressedXContent(mapping1), MapperService.MergeReason.MAPPING_UPDATE, false); - DocumentMapper merged = mapperService.merge("type", new CompressedXContent(mapping2), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper existing = mapperService.merge("type", new CompressedXContent(mapping1), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper merged = mapperService.merge("type", new CompressedXContent(mapping2), MapperService.MergeReason.MAPPING_UPDATE); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); @@ -139,7 +139,7 @@ public void testChangeSearchAnalyzerToDefault() throws Exception { public void testConcurrentMergeTest() throws Throwable { final MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); final DocumentMapper documentMapper = mapperService.documentMapper("test"); DocumentFieldMappers dfm = documentMapper.mappers(); @@ -169,7 +169,7 @@ public void run() { Mapping update = doc.dynamicMappingsUpdate(); assert update != null; lastIntroducedFieldName.set(fieldName); - mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE); } } catch (Exception e) { error.set(e); @@ -213,7 +213,7 @@ public void testDoNotRepeatOriginalMapping() throws IOException { .endObject() .endObject().endObject().bytes()); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("type", mapping, MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", mapping, MapperService.MergeReason.MAPPING_UPDATE); CompressedXContent update = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("type") @@ -223,7 +223,7 @@ public void testDoNotRepeatOriginalMapping() throws IOException { .endObject() .endObject() .endObject().endObject().bytes()); - DocumentMapper mapper = mapperService.merge("type", update, MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", update, MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapper.mappers().getMapper("foo")); assertFalse(mapper.sourceMapper().enabled()); @@ -244,7 +244,7 @@ public void testMergeChildType() throws IOException { .startObject("name").field("type", "text").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper1 = parser.parse("child", new CompressedXContent(updatedMapping1)); - DocumentMapper mergedMapper1 = initMapper.merge(updatedMapper1.mapping(), false); + DocumentMapper mergedMapper1 = initMapper.merge(updatedMapper1.mapping()); assertThat(mergedMapper1.mappers().getMapper("_parent#parent"), notNullValue()); assertThat(mergedMapper1.mappers().getMapper("name"), notNullValue()); @@ -255,7 +255,7 @@ public void testMergeChildType() throws IOException { .startObject("age").field("type", "byte").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper2 = parser.parse("child", new CompressedXContent(updatedMapping2)); - DocumentMapper mergedMapper2 = mergedMapper1.merge(updatedMapper2.mapping(), false); + DocumentMapper mergedMapper2 = mergedMapper1.merge(updatedMapper2.mapping()); assertThat(mergedMapper2.mappers().getMapper("_parent#parent"), notNullValue()); assertThat(mergedMapper2.mappers().getMapper("name"), notNullValue()); @@ -265,7 +265,7 @@ public void testMergeChildType() throws IOException { .startObject("_parent").field("type", "new_parent").endObject() .endObject().endObject().string(); DocumentMapper modParentMapper = parser.parse("child", new CompressedXContent(modParentMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(modParentMapper.mapping(), false)); + Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(modParentMapper.mapping())); assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [parent]->[new_parent]")); } @@ -286,7 +286,7 @@ public void testMergeAddingParent() throws IOException { .startObject("age").field("type", "byte").endObject() .endObject().endObject().endObject().string(); DocumentMapper updatedMapper = parser.parse("cowboy", new CompressedXContent(updatedMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping(), false)); + Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping())); assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); } @@ -317,7 +317,7 @@ public void testMergeMeta() throws IOException { .string(); DocumentMapper updatedMapper = parser.parse("test", new CompressedXContent(updateMapping)); - assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("bar")); + assertThat(initMapper.merge(updatedMapper.mapping()).meta().get("foo"), equalTo("bar")); updateMapping = XContentFactory.jsonBuilder() .startObject() @@ -330,6 +330,6 @@ public void testMergeMeta() throws IOException { .string(); updatedMapper = parser.parse("test", new CompressedXContent(updateMapping)); - assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("new_bar")); + assertThat(initMapper.merge(updatedMapper.mapping()).meta().get("foo"), equalTo("new_bar")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index b227833f3444d..fd61afc566efc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -552,7 +552,7 @@ public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { .endArray() .endObject().endObject(); indexService.mapperService().merge("_doc", new CompressedXContent(mappings1.bytes()), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("field", "foo") @@ -564,7 +564,7 @@ public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { assertNotNull(parsed.dynamicMappingsUpdate()); indexService.mapperService().merge("_doc", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); mapper = indexService.mapperService().documentMapper("_doc"); assertNotNull(mapper.mappers().getMapper("field.raw")); parsed = mapper.parse(source); @@ -591,7 +591,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception .endObject() .endArray() .endObject().endObject(); - indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder mappings2 = jsonBuilder().startObject() .startObject("type2") .startObject("properties") @@ -600,7 +600,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception .endObject() .endObject() .endObject().endObject(); - indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("field", "foo") @@ -611,7 +611,7 @@ public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception ParsedDocument parsed = mapper.parse(source); assertNotNull(parsed.dynamicMappingsUpdate()); - indexService.mapperService().merge("type1", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type1", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), MapperService.MergeReason.MAPPING_UPDATE); mapper = indexService.mapperService().documentMapper("type1"); assertNotNull(mapper.mappers().getMapper("field.raw")); parsed = mapper.parse(source); @@ -624,7 +624,7 @@ public void testDefaultFloatingPointMappings() throws IOException { .startObject("type") .field("numeric_detection", true) .endObject().endObject().string(); - mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapper("type"); doTestDefaultFloatingPointMappings(mapper, XContentFactory.jsonBuilder()); doTestDefaultFloatingPointMappings(mapper, XContentFactory.yamlBuilder()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index 67c8435520aef..8c2e6d475414d 100755 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -199,7 +199,7 @@ protected void parseCreateField(ParseContext context, List field } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + protected void doMerge(Mapper mergeWith) { // ignore this for now } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java index 464b0d9f8406a..efb2023ee6919 100755 --- a/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FakeStringFieldMapper.java @@ -156,8 +156,8 @@ protected String contentType() { } @Override - protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { - super.doMerge(mergeWith, updateAllTypes); + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 3655f04fcbba1..f075353736672 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -137,11 +137,11 @@ public void testMergingMappings() throws Exception { .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); - DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper mapperDisabled = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertFalse(mapperDisabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); - mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertTrue(mapperEnabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index fe885a46b87ef..39753548ee390 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -39,7 +39,6 @@ public class FieldTypeLookupTests extends ESTestCase { public void testEmpty() { FieldTypeLookup lookup = new FieldTypeLookup(); assertNull(lookup.get("foo")); - assertEquals(Collections.emptySet(), lookup.getTypes("foo")); Collection names = lookup.simpleMatchToFullName("foo"); assertNotNull(names); assertTrue(names.isEmpty()); @@ -51,7 +50,7 @@ public void testEmpty() { public void testDefaultMapping() { FieldTypeLookup lookup = new FieldTypeLookup(); try { - lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList(), randomBoolean()); + lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList()); fail(); } catch (IllegalArgumentException expected) { assertEquals("Default mappings should not be added to the lookup", expected.getMessage()); @@ -61,15 +60,11 @@ public void testDefaultMapping() { public void testAddNewField() { FieldTypeLookup lookup = new FieldTypeLookup(); MockFieldMapper f = new MockFieldMapper("foo"); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f), randomBoolean()); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f)); assertNull(lookup.get("foo")); assertNull(lookup.get("bar")); assertEquals(f.fieldType(), lookup2.get("foo")); assertNull(lookup.get("bar")); - assertEquals(Collections.emptySet(), lookup.getTypes("foo")); - assertEquals(Collections.emptySet(), lookup.getTypes("bar")); - assertEquals(Collections.singleton("type"), lookup2.getTypes("foo")); - assertEquals(Collections.emptySet(), lookup2.getTypes("bar")); assertEquals(1, size(lookup2.iterator())); } @@ -77,8 +72,8 @@ public void testAddExistingField() { MockFieldMapper f = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type1", newList(f), true); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), true); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertEquals(1, size(lookup2.iterator())); assertSame(f.fieldType(), lookup2.get("foo")); @@ -89,8 +84,8 @@ public void testAddExistingIndexName() { MockFieldMapper f = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean()); - FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertSame(f.fieldType(), lookup2.get("foo")); assertSame(f2.fieldType(), lookup2.get("bar")); @@ -102,7 +97,7 @@ public void testAddExistingFullName() { MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); try { - lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); + lookup.copyAndAddAll("type2", newList(f2)); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] has different [index_name]")); } @@ -111,20 +106,13 @@ public void testAddExistingFullName() { public void testCheckCompatibilityMismatchedTypes() { FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); OtherFakeFieldType ft2 = new OtherFakeFieldType(); ft2.setName("foo"); FieldMapper f2 = new MockFieldMapper("foo", ft2); try { - lookup.copyAndAddAll("type2", newList(f2), false); - fail("expected type mismatch"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("cannot be changed from type [faketype] to [otherfaketype]")); - } - // fails even if updateAllTypes == true - try { - lookup.copyAndAddAll("type2", newList(f2), true); + lookup.copyAndAddAll("type2", newList(f2)); fail("expected type mismatch"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("cannot be changed from type [faketype] to [otherfaketype]")); @@ -134,35 +122,21 @@ public void testCheckCompatibilityMismatchedTypes() { public void testCheckCompatibilityConflict() { FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); MappedFieldType ft2 = new MockFieldMapper.FakeFieldType(); ft2.setName("foo"); ft2.setBoost(2.0f); FieldMapper f2 = new MockFieldMapper("foo", ft2); - try { - // different type - lookup.copyAndAddAll("type2", newList(f2), false); - fail("expected conflict"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("to update [boost] across all types")); - } - lookup.copyAndAddAll("type", newList(f2), false); // boost is updateable, so ok since we are implicitly updating all types - lookup.copyAndAddAll("type2", newList(f2), true); // boost is updateable, so ok if forcing + lookup.copyAndAddAll("type", newList(f2)); // boost is updateable, so ok since we are implicitly updating all types + lookup.copyAndAddAll("type2", newList(f2)); // boost is updateable, so ok if forcing // now with a non changeable setting MappedFieldType ft3 = new MockFieldMapper.FakeFieldType(); ft3.setName("foo"); ft3.setStored(true); FieldMapper f3 = new MockFieldMapper("foo", ft3); try { - lookup.copyAndAddAll("type2", newList(f3), false); - fail("expected conflict"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different [store] values")); - } - // even with updateAllTypes == true, incompatible - try { - lookup.copyAndAddAll("type2", newList(f3), true); + lookup.copyAndAddAll("type2", newList(f3)); fail("expected conflict"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("has different [store] values")); @@ -173,7 +147,7 @@ public void testSimpleMatchFullNames() { MockFieldMapper f1 = new MockFieldMapper("foo"); MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1, f2), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1, f2)); Collection names = lookup.simpleMatchToFullName("b*"); assertFalse(names.contains("foo")); assertTrue(names.contains("bar")); @@ -182,7 +156,7 @@ public void testSimpleMatchFullNames() { public void testIteratorImmutable() { MockFieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); + lookup = lookup.copyAndAddAll("type", newList(f1)); try { Iterator itr = lookup.iterator(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index e43cfbe1fd1c1..a9a830a4141e9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -417,13 +417,13 @@ public void testGeoShapeMapperMerge() throws Exception { .field("precision", "1m").field("tree_levels", 8).field("distance_error_pct", 0.01).field("orientation", "ccw") .endObject().endObject().endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("type", new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE); String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "geo_shape").field("tree", "quadtree") .field("strategy", "term").field("precision", "1km").field("tree_levels", 26).field("distance_error_pct", 26) .field("orientation", "cw").endObject().endObject().endObject().endObject().string(); try { - mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [shape] has different [strategy]")); @@ -449,7 +449,7 @@ public void testGeoShapeMapperMerge() throws Exception { stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m") .field("tree_levels", 8).field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject().string(); - docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("type", new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = docMapper.mappers().getMapper("shape"); assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index ec07c4d92be3e..111389336f291 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -62,7 +62,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(IdFieldMapper.NAME))); } @@ -70,7 +70,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(IdFieldMapper.NAME); assertEquals(1, fields.length); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java index c17df90b5a21d..49034a0b28785 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java @@ -40,7 +40,7 @@ public void testMergeMultiField() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json"); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); @@ -53,7 +53,7 @@ public void testMergeMultiField() throws Exception { assertThat(f, nullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -70,7 +70,7 @@ public void testMergeMultiField() throws Exception { assertThat(f, notNullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -81,7 +81,7 @@ public void testMergeMultiField() throws Exception { assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -96,7 +96,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json"); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + DocumentMapper docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); @@ -110,7 +110,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -127,7 +127,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(f, notNullValue()); mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json"); - docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + docMapper = mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -140,7 +140,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json"); try { - mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [name] has different [index] values")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index e67b25b051b4e..aa663ed5699a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -358,7 +358,7 @@ public void testUpdateNormalizer() throws IOException { .startObject("properties").startObject("field") .field("type", "keyword").field("normalizer", "my_lowercase").endObject().endObject() .endObject().endObject().string(); - indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") @@ -366,9 +366,9 @@ public void testUpdateNormalizer() throws IOException { .endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", - new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertEquals( - "Mapper for [field] conflicts with existing mapping in other types:\n[mapper [field] has different [normalizer]]", + "Mapper for [field] conflicts with existing mapping:\n[mapper [field] has different [normalizer]]", e.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 95183cc854a38..6d7665d889563 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -89,15 +89,15 @@ public void testTypes() throws Exception { MapperService mapperService = indexService1.mapperService(); assertEquals(Collections.emptySet(), mapperService.types()); - mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); - mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), MapperService.MergeReason.MAPPING_UPDATE); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types()); } @@ -148,11 +148,11 @@ public void testTotalFieldsExceedsLimit() throws Throwable { throw new UncheckedIOException(e); } }; - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); //set total number of fields to 1 to trigger an exception IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); }); assertTrue(e.getMessage(), e.getMessage().contains("Limit of total fields [1] in index [test2] has been exceeded")); } @@ -166,7 +166,7 @@ public void testMappingDepthExceedsLimit() throws Throwable { .endObject().endObject().bytes()); IndexService indexService1 = createIndex("test1", Settings.builder().put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), 1).build()); // no exception - indexService1.mapperService().merge("type", simpleMapping, MergeReason.MAPPING_UPDATE, false); + indexService1.mapperService().merge("type", simpleMapping, MergeReason.MAPPING_UPDATE); CompressedXContent objectMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("properties") @@ -177,10 +177,10 @@ public void testMappingDepthExceedsLimit() throws Throwable { IndexService indexService2 = createIndex("test2"); // no exception - indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE, false); + indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE, false)); + () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } @@ -200,14 +200,14 @@ public void testMergeWithMap() throws Throwable { mappings.put(MapperService.DEFAULT_MAPPING, MapperService.parseMapping(xContentRegistry(), "{}")); MapperException e = expectThrows(MapperParsingException.class, - () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false)); + () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), startsWith("Failed to parse mapping [" + MapperService.DEFAULT_MAPPING + "]: ")); mappings.clear(); mappings.put("type1", MapperService.parseMapping(xContentRegistry(), "{}")); e = expectThrows( MapperParsingException.class, - () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false)); + () -> mapperService.merge(mappings, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: ")); } @@ -223,7 +223,7 @@ public void testMergeParentTypesSame() { Set parentTypes = mapperService.getParentTypes(); Map> mappings = new HashMap<>(); - mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false); + mapperService.merge(mappings, MergeReason.MAPPING_UPDATE); assertSame(parentTypes, mapperService.getParentTypes()); } @@ -238,10 +238,10 @@ public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IO .endObject() .endObject().endObject().bytes()); - indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE); DocumentMapper documentMapper = indexService.mapperService().documentMapper("type1"); - indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE); assertSame(indexService.mapperService().documentMapper("type1"), documentMapper); CompressedXContent normsDisabledMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() @@ -252,7 +252,7 @@ public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IO .endObject() .endObject().endObject().bytes()); - indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE, true); + indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE); assertNotSame(indexService.mapperService().documentMapper("type1"), documentMapper); } @@ -307,7 +307,7 @@ public void testIndexSortWithNestedFields() throws IOException { .endObject().endObject().bytes()); invalidNestedException = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("t", nestedFieldMapping, - MergeReason.MAPPING_UPDATE, true)); + MergeReason.MAPPING_UPDATE)); assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); } @@ -315,18 +315,18 @@ public void testIndexSortWithNestedFields() throws IOException { public void testForbidMultipleTypes() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); + () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } public void testDefaultMappingIsDeprecated() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); + mapperService.merge("_default_", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertWarnings("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + "cannot have more than one type"); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 8da4b302a6f8f..bbcad5b7203a2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -444,23 +444,23 @@ public void testLimitOfNestedFieldsPerIndex() throws Exception { }; // default limit allows at least two nested fields - createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE); // explicitly setting limit to 0 prevents nested fields Exception e = expectThrows(IllegalArgumentException.class, () -> createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [0] in index [test2] has been exceeded")); // setting limit to 1 with 2 nested fields fails e = expectThrows(IllegalArgumentException.class, () -> createIndex("test3", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 1).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { @@ -479,19 +479,19 @@ public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { MapperService mapperService = createIndex("test4", Settings.builder() .put("index.version.created", Version.V_5_6_0) .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2).build()).mapperService(); - mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE); // merging same fields, but different type is ok - mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE); // adding new fields from different type is not ok String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); // do not check nested fields limit if mapping is not updated createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } public void testParentObjectMapperAreNested() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index bba2007285bcc..ea8f63345a183 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -181,13 +181,13 @@ public void testMerge() throws IOException { .endObject() .endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertNull(mapper.root().dynamic()); String update = XContentFactory.jsonBuilder().startObject() .startObject("type") .field("dynamic", "strict") .endObject().endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.MAPPING_UPDATE); assertEquals(Dynamic.STRICT, mapper.root().dynamic()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index d21827ee18cea..dee554449bcc4 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -78,8 +78,8 @@ public void testJoinFieldSet() throws Exception { .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); // Indexing parent doc: DocumentMapper parentDocMapper = indexService.mapperService().documentMapper("parent_type"); @@ -121,7 +121,7 @@ public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { .startObject("properties") .endObject() .endObject().endObject(); - mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE, false); + mapperService.merge("some_type", new CompressedXContent(mappingSource.string()), MergeReason.MAPPING_UPDATE); Set allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*")); assertTrue(allFields.contains("_parent")); assertFalse(allFields.contains("_parent#null")); @@ -146,15 +146,15 @@ public void testUpdateEagerGlobalOrds() throws IOException { .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); + indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); String childMappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("child_type") .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() .endObject().endObject().string(); - indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE); assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 72195fbd954fc..c93d968b448fe 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -61,7 +61,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws DOUBLE_RANGE_FIELD_NAME, "type=double_range", DATE_RANGE_FIELD_NAME, "type=date_range", IP_RANGE_FIELD_NAME, "type=ip_range" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java index a76d5d01316fb..d55b90573e9cd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RootObjectMapperTests.java @@ -36,7 +36,7 @@ public void testNumericDetection() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value @@ -46,7 +46,7 @@ public void testNumericDetection() throws Exception { .field("numeric_detection", true) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change @@ -55,7 +55,7 @@ public void testNumericDetection() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } @@ -67,7 +67,7 @@ public void testDateDetection() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value @@ -77,7 +77,7 @@ public void testDateDetection() throws Exception { .field("date_detection", false) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change @@ -86,7 +86,7 @@ public void testDateDetection() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping2, mapper.mappingSource().toString()); } @@ -98,7 +98,7 @@ public void testDateFormatters() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if formatters are not set explicitly @@ -107,7 +107,7 @@ public void testDateFormatters() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = XContentFactory.jsonBuilder() @@ -116,7 +116,7 @@ public void testDateFormatters() throws Exception { .field("dynamic_date_formats", Arrays.asList()) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } @@ -137,7 +137,7 @@ public void testDynamicTemplates() throws Exception { .endObject() .endObject().string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if templates are not set explicitly @@ -146,7 +146,7 @@ public void testDynamicTemplates() throws Exception { .startObject("type") .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); String mapping3 = XContentFactory.jsonBuilder() @@ -155,7 +155,7 @@ public void testDynamicTemplates() throws Exception { .field("dynamic_templates", Arrays.asList()) .endObject() .endObject().string(); - mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false); + mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 85017cb35cd39..f40229e9ebe78 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -164,7 +164,7 @@ public void testDefaultMappingAndNoMappingWithMapperService() throws Exception { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0).build(); MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type").getDocumentMapper(); assertThat(mapper.type(), equalTo("my_type")); @@ -178,12 +178,12 @@ public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws E Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0).build(); MapperService mapperService = createIndex("test", settings).mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), MapperService.MergeReason.MAPPING_UPDATE); String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") .startObject("_source").field("enabled", true).endObject() .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("my_type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); DocumentMapper mapper = mapperService.documentMapper("my_type"); assertThat(mapper.type(), equalTo("my_type")); @@ -194,10 +194,10 @@ void assertConflicts(String mapping1, String mapping2, DocumentMapperParser pars DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping1)); docMapper = parser.parse("type", docMapper.mappingSource()); if (conflicts.length == 0) { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), false); + docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); } else { try { - docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping(), false); + docMapper.merge(parser.parse("type", new CompressedXContent(mapping2)).mapping()); fail(); } catch (IllegalArgumentException e) { for (String conflict : conflicts) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java index 438ccd5fa8688..2ff2bda01df25 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java @@ -62,7 +62,7 @@ public void testBytesAndNumericRepresentation() throws Exception { .endObject() .string(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 3f0f4a87792d3..f5b83d1bd6eb1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -205,7 +205,7 @@ public void testDefaultPositionIncrementGap() throws IOException { .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); @@ -247,7 +247,7 @@ public void testPositionIncrementGap() throws IOException { .endObject().endObject().string(); DocumentMapper mapper = indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index d941b5a0469b9..4687a3a24ef74 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -66,7 +66,7 @@ public void testDocValues(boolean singleType) throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); Directory dir = newDirectory(); @@ -93,7 +93,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(TypeFieldMapper.NAME); assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); @@ -103,7 +103,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(TypeFieldMapper.NAME))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java index c5816de2e1920..4128cec082e0a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java @@ -48,7 +48,7 @@ public void testDefaultsMultipleTypes() throws IOException { .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); IndexableField[] fields = document.rootDoc().getFields(UidFieldMapper.NAME); assertEquals(1, fields.length); @@ -60,7 +60,7 @@ public void testDefaultsMultipleTypes() throws IOException { public void testDefaultsSingleType() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(UidFieldMapper.NAME))); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index c6a1eae036ada..c21fffc1bb167 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -76,7 +76,7 @@ protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mappi CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping try { - indexService.mapperService().merge("type", new CompressedXContent(mappingUpdate.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("type", new CompressedXContent(mappingUpdate.bytes()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -97,14 +97,14 @@ public void testConflictSameType() throws Exception { .endObject().endObject().endObject(); try { - mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] cannot be changed from type [long] to [double]")); } try { - mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("mapper [foo] cannot be changed from type [long] to [double]")); @@ -125,7 +125,7 @@ public void testConflictNewType() throws Exception { .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -133,7 +133,7 @@ public void testConflictNewType() throws Exception { } try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -154,15 +154,15 @@ public void testConflictNewTypeUpdate() throws Exception { MapperService mapperService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()).mapperService(); - mapperService.merge("type1", new CompressedXContent(mapping1.string()), MapperService.MergeReason.MAPPING_UPDATE, false); - mapperService.merge("type2", new CompressedXContent(mapping2.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type1", new CompressedXContent(mapping1.string()), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("type2", new CompressedXContent(mapping2.string()), MapperService.MergeReason.MAPPING_UPDATE); XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") .startObject("properties").startObject("foo").field("type", "double").endObject() .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -170,7 +170,7 @@ public void testConflictNewTypeUpdate() throws Exception { } try { - mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type2", new CompressedXContent(update.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected @@ -190,14 +190,14 @@ public void testReuseMetaField() throws IOException { MapperService mapperService = createIndex("test", Settings.builder().build()).mapperService(); try { - mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); } try { - mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("type", new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); @@ -223,16 +223,16 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject().endObject().string(); MapperService mapperService1 = createIndex("test1").mapperService(); - mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false); + mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("[foo] is defined as a field in mapping [type2" + "] but this name is already used for an object in other types")); MapperService mapperService2 = createIndex("test2").mapperService(); - mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false); + mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); e = expectThrows(IllegalArgumentException.class, - () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE, false)); + () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), equalTo("[foo] is defined as an object in mapping [type1" + "] but this name is already used for a field in other types")); } diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 8849c91ddb368..00a9753b6f874 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -355,7 +355,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws "string_boost", "type=text,boost=4", "string_no_pos", "type=text,index_options=docs").string() ), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); } public void testMatchPhrasePrefixWithBoost() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 31a749161074a..c199bf02dd264 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -67,7 +67,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", "nested1", "type=nested" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 1b8cef1ab5355..3093031fbca96 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -835,7 +835,7 @@ public void testDisabledFieldNamesField() throws Exception { PutMappingRequest.buildFromSimplifiedDef("_doc", "foo", "type=text", "_field_names", "enabled=false").string()), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); try { QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); Query query = queryBuilder.toQuery(context); @@ -848,7 +848,7 @@ public void testDisabledFieldNamesField() throws Exception { PutMappingRequest.buildFromSimplifiedDef("_doc", "foo", "type=text", "_field_names", "enabled=true").string()), - MapperService.MergeReason.MAPPING_UPDATE, true); + MapperService.MergeReason.MAPPING_UPDATE); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java index 1a28441dc170a..95eb8a2d6325f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java @@ -52,7 +52,7 @@ public void testRewriteMissingReader() throws Exception { .endObject() .endObject().endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(), null, null, null, null); RangeQueryBuilder range = new RangeQueryBuilder("foo"); @@ -70,7 +70,7 @@ public void testRewriteEmptyReader() throws Exception { .endObject() .endObject().endObject().string(); indexService.mapperService().merge("type", - new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); + new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); IndexReader reader = new MultiReader(); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), null, null, xContentRegistry(), writableRegistry(), null, reader, null, null); diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index 61336028779d9..91de39ecffff4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -76,7 +76,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws String docType = "_doc"; mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, "m_s_m", "type=long" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 1f033b5fb4187..ffd6c347e21e9 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -87,7 +87,7 @@ public void setup() throws IOException { " }\n" + " }\n" + "}"; - mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); + mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); this.indexService = indexService; } diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 9c501f6c02c05..029950fda54b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -353,7 +353,6 @@ public void testUpdateMappingOnAllTypes() { assertAcked(client().admin().indices().preparePutMapping("index") .setType("type1") - .setUpdateAllTypes(true) .setSource("f", "type=keyword,null_value=n/a") .get()); diff --git a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index cd15c9668348b..36d672e40f278 100644 --- a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -129,7 +129,6 @@ public void testChangingEagerParentFieldLoadingAtRuntime() throws Exception { PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child") .setSource(childMapping(true)) - .setUpdateAllTypes(true) .get(); assertAcked(putMappingResponse); Index test = resolveIndex("test"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 6834d124c499a..4bdd9b84ec463 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -80,7 +80,7 @@ private void applyOperation(Engine engine, Engine.Operation operation) throws IO Engine.Index engineIndex = (Engine.Index) operation; Mapping update = engineIndex.parsedDoc().dynamicMappingsUpdate(); if (engineIndex.parsedDoc().dynamicMappingsUpdate() != null) { - recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update, false)); + recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update)); } engine.index(engineIndex); break; diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 2b6f4c38a902b..818594d3bf7fd 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -42,12 +42,12 @@ public abstract class FieldTypeTestCase extends ESTestCase { public abstract static class Modifier { /** The name of the property that is being modified. Used in test failure messages. */ public final String property; - /** true if this modifier only makes types incompatible in strict mode, false otherwise */ - public final boolean strictOnly; + /** True if this property is updateable, false otherwise. */ + public final boolean updateable; - public Modifier(String property, boolean strictOnly) { + public Modifier(String property, boolean updateable) { this.property = property; - this.strictOnly = strictOnly; + this.updateable = updateable; } /** Modifies the property */ @@ -189,16 +189,16 @@ protected void assertFieldTypeNotEquals(String property, MappedFieldType ft1, Ma } } - protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict) { + protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2) { List conflicts = new ArrayList<>(); - ft1.checkCompatibility(ft2, conflicts, strict); + ft1.checkCompatibility(ft2, conflicts); assertTrue("Found conflicts for " + msg + ": " + conflicts, conflicts.isEmpty()); } - protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict, String... messages) { + protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, String... messages) { assert messages.length != 0; List conflicts = new ArrayList<>(); - ft1.checkCompatibility(ft2, conflicts, strict); + ft1.checkCompatibility(ft2, conflicts); for (String message : messages) { boolean found = false; for (String conflict : conflicts) { @@ -279,7 +279,7 @@ public void testFreeze() { public void testCheckTypeName() { final MappedFieldType fieldType = createNamedDefaultFieldType(); List conflicts = new ArrayList<>(); - fieldType.checkCompatibility(fieldType, conflicts, random().nextBoolean()); // no exception + fieldType.checkCompatibility(fieldType, conflicts); // no exception assertTrue(conflicts.toString(), conflicts.isEmpty()); MappedFieldType bogus = new TermBasedFieldType() { @@ -291,7 +291,7 @@ public void testCheckTypeName() { public Query existsQuery(QueryShardContext context) { return null; } }; try { - fieldType.checkCompatibility(bogus, conflicts, random().nextBoolean()); + fieldType.checkCompatibility(bogus, conflicts); fail("expected bad types exception"); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("Type names equal")); @@ -307,7 +307,7 @@ public void testCheckTypeName() { public Query existsQuery(QueryShardContext context) { return null; } }; try { - fieldType.checkCompatibility(other, conflicts, random().nextBoolean()); + fieldType.checkCompatibility(other, conflicts); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("cannot be changed from type")); @@ -318,32 +318,22 @@ public void testCheckTypeName() { public void testCheckCompatibility() { MappedFieldType ft1 = createNamedDefaultFieldType(); MappedFieldType ft2 = createNamedDefaultFieldType(); - assertCompatible("default", ft1, ft2, true); - assertCompatible("default", ft1, ft2, false); - assertCompatible("default", ft2, ft1, true); - assertCompatible("default", ft2, ft1, false); + assertCompatible("default", ft1, ft2); + assertCompatible("default", ft2, ft1); for (Modifier modifier : modifiers) { ft1 = createNamedDefaultFieldType(); ft2 = createNamedDefaultFieldType(); modifier.normalizeOther(ft1); modifier.modify(ft2); - if (modifier.strictOnly) { - String[] conflicts = { - "mapper [foo] is used by multiple types", - "update [" + modifier.property + "]" - }; - assertCompatible(modifier.property, ft1, ft2, false); - assertNotCompatible(modifier.property, ft1, ft2, true, conflicts); - assertCompatible(modifier.property, ft2, ft1, false); // always symmetric when not strict - assertNotCompatible(modifier.property, ft2, ft1, true, conflicts); + if (modifier.updateable) { + assertCompatible(modifier.property, ft1, ft2); + assertCompatible(modifier.property, ft2, ft1); // always symmetric when not strict } else { // not compatible whether strict or not String conflict = "different [" + modifier.property + "]"; - assertNotCompatible(modifier.property, ft1, ft2, true, conflict); - assertNotCompatible(modifier.property, ft1, ft2, false, conflict); - assertNotCompatible(modifier.property, ft2, ft1, true, conflict); - assertNotCompatible(modifier.property, ft2, ft1, false, conflict); + assertNotCompatible(modifier.property, ft1, ft2, conflict); + assertNotCompatible(modifier.property, ft2, ft1, conflict); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index cd55c1126eb1c..ca9d1728d92bb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -287,7 +287,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), indexSettings.getSettings(), "index"); - mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); final IndexEventListener indexEventListener = new IndexEventListener() { }; @@ -579,7 +579,7 @@ protected Consumer getMappingUpdater(IndexShard shard, String type) { protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { shard.indexSettings().updateIndexMetaData(indexMetadata); - shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE, true); + shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE); } protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f833e3c61002c..4d30bddb3a45f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -1083,12 +1083,12 @@ public void onRemoval(ShardId shardId, Accountable accountable) { OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", GEO_SHAPE_FIELD_NAME, "type=geo_shape" - ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); + ).string()), MapperService.MergeReason.MAPPING_UPDATE); // also add mappings for two inner field in the object field mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), - MapperService.MergeReason.MAPPING_UPDATE, false); + MapperService.MergeReason.MAPPING_UPDATE); } testCase.initializeAdditionalMappings(mapperService); } From 452c36c5523b59ac3ce1e951bde63fa528e7a913 Mon Sep 17 00:00:00 2001 From: kel Date: Mon, 22 Jan 2018 19:42:56 +0800 Subject: [PATCH 77/94] Calculate sum in Kahan summation algorithm in aggregations (#27807) (#27848) --- .../metrics/avg/AvgAggregator.java | 24 +++++-- .../aggregations/metrics/avg/InternalAvg.java | 15 ++++- .../metrics/stats/InternalStats.java | 13 +++- .../metrics/stats/StatsAggregator.java | 23 +++++-- .../extended/ExtendedStatsAggregator.java | 41 ++++++++++-- .../stats/extended/InternalExtendedStats.java | 15 ++++- .../aggregations/metrics/sum/InternalSum.java | 15 ++++- .../metrics/sum/SumAggregator.java | 24 +++++-- .../metrics/ExtendedStatsAggregatorTests.java | 65 +++++++++++++++++++ .../metrics/InternalExtendedStatsTests.java | 42 ++++++++++++ .../metrics/InternalStatsTests.java | 52 ++++++++++++++- .../metrics/InternalSumTests.java | 45 ++++++++++++- .../metrics/StatsAggregatorTests.java | 63 ++++++++++++++++++ .../metrics/SumAggregatorTests.java | 54 ++++++++++++++- .../metrics/avg/AvgAggregatorTests.java | 61 +++++++++++++++-- .../metrics/avg/InternalAvgTests.java | 41 ++++++++++++ .../test/InternalAggregationTestCase.java | 1 + 17 files changed, 557 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index 0decfa05575e4..27890efbff182 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -44,6 +44,7 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { LongArray counts; DoubleArray sums; + DoubleArray compensations; DocValueFormat format; public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, @@ -55,6 +56,7 @@ public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFor final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); } } @@ -76,15 +78,29 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); sums = bigArrays.grow(sums, bucket + 1); + compensations = bigArrays.grow(compensations, bucket + 1); if (values.advanceExact(doc)) { final int valueCount = values.docValueCount(); counts.increment(bucket, valueCount); - double sum = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + for (int i = 0; i < valueCount; i++) { - sum += values.nextValue(); + double value = values.nextValue(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } - sums.increment(bucket, sum); + sums.set(bucket, sum); + compensations.set(bucket, compensation); } } }; @@ -113,7 +129,7 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, sums); + Releasables.close(counts, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index 7fdcc6396b8c1..c30574c576de8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -91,9 +91,20 @@ public String getWriteableName() { public InternalAvg doReduce(List aggregations, ReduceContext reduceContext) { long count = 0; double sum = 0; + double compensation = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. for (InternalAggregation aggregation : aggregations) { - count += ((InternalAvg) aggregation).count; - sum += ((InternalAvg) aggregation).sum; + InternalAvg avg = (InternalAvg) aggregation; + count += avg.count; + if (Double.isFinite(avg.sum) == false) { + sum += avg.sum; + } else if (Double.isFinite(sum)) { + double corrected = avg.sum - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalAvg(getName(), sum, count, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 6d7ae0cddc0df..19f74cd72c821 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -152,12 +152,23 @@ public InternalStats doReduce(List aggregations, ReduceCont double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; double sum = 0; + double compensation = 0; for (InternalAggregation aggregation : aggregations) { InternalStats stats = (InternalStats) aggregation; count += stats.getCount(); min = Math.min(min, stats.getMin()); max = Math.max(max, stats.getMax()); - sum += stats.getSum(); + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double value = stats.getSum(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalStats(name, count, sum, min, max, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java index cca176bd1ad5f..321e9e10f0fe8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java @@ -45,6 +45,7 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { LongArray counts; DoubleArray sums; + DoubleArray compensations; DoubleArray mins; DoubleArray maxes; @@ -59,6 +60,7 @@ public StatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueF final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); mins = bigArrays.newDoubleArray(1, false); mins.fill(0, mins.size(), Double.POSITIVE_INFINITY); maxes = bigArrays.newDoubleArray(1, false); @@ -88,6 +90,7 @@ public void collect(int doc, long bucket) throws IOException { final long overSize = BigArrays.overSize(bucket + 1); counts = bigArrays.resize(counts, overSize); sums = bigArrays.resize(sums, overSize); + compensations = bigArrays.resize(compensations, overSize); mins = bigArrays.resize(mins, overSize); maxes = bigArrays.resize(maxes, overSize); mins.fill(from, overSize, Double.POSITIVE_INFINITY); @@ -97,16 +100,28 @@ public void collect(int doc, long bucket) throws IOException { if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); counts.increment(bucket, valuesCount); - double sum = 0; double min = mins.get(bucket); double max = maxes.get(bucket); + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + for (int i = 0; i < valuesCount; i++) { double value = values.nextValue(); - sum += value; + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } min = Math.min(min, value); max = Math.max(max, value); } - sums.increment(bucket, sum); + sums.set(bucket, sum); + compensations.set(bucket, compensation); mins.set(bucket, min); maxes.set(bucket, max); } @@ -164,6 +179,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, maxes, mins, sums); + Releasables.close(counts, maxes, mins, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 8dd78bf13730b..8339c06aefdcc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -49,9 +49,11 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue LongArray counts; DoubleArray sums; + DoubleArray compensations; DoubleArray mins; DoubleArray maxes; DoubleArray sumOfSqrs; + DoubleArray compensationOfSqrs; public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, double sigma, List pipelineAggregators, @@ -65,11 +67,13 @@ public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, D final BigArrays bigArrays = context.bigArrays(); counts = bigArrays.newLongArray(1, true); sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); mins = bigArrays.newDoubleArray(1, false); mins.fill(0, mins.size(), Double.POSITIVE_INFINITY); maxes = bigArrays.newDoubleArray(1, false); maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY); sumOfSqrs = bigArrays.newDoubleArray(1, true); + compensationOfSqrs = bigArrays.newDoubleArray(1, true); } } @@ -95,9 +99,11 @@ public void collect(int doc, long bucket) throws IOException { final long overSize = BigArrays.overSize(bucket + 1); counts = bigArrays.resize(counts, overSize); sums = bigArrays.resize(sums, overSize); + compensations = bigArrays.resize(compensations, overSize); mins = bigArrays.resize(mins, overSize); maxes = bigArrays.resize(maxes, overSize); sumOfSqrs = bigArrays.resize(sumOfSqrs, overSize); + compensationOfSqrs = bigArrays.resize(compensationOfSqrs, overSize); mins.fill(from, overSize, Double.POSITIVE_INFINITY); maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); } @@ -105,19 +111,40 @@ public void collect(int doc, long bucket) throws IOException { if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); counts.increment(bucket, valuesCount); - double sum = 0; - double sumOfSqr = 0; double min = mins.get(bucket); double max = maxes.get(bucket); + // Compute the sum and sum of squires for double values with Kahan summation algorithm + // which is more accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); + double sumOfSqr = sumOfSqrs.get(bucket); + double compensationOfSqr = compensationOfSqrs.get(bucket); for (int i = 0; i < valuesCount; i++) { double value = values.nextValue(); - sum += value; - sumOfSqr += value * value; + if (Double.isFinite(value) == false) { + sum += value; + sumOfSqr += value * value; + } else { + if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } + if (Double.isFinite(sumOfSqr)) { + double correctedOfSqr = value * value - compensationOfSqr; + double newSumOfSqr = sumOfSqr + correctedOfSqr; + compensationOfSqr = (newSumOfSqr - sumOfSqr) - correctedOfSqr; + sumOfSqr = newSumOfSqr; + } + } min = Math.min(min, value); max = Math.max(max, value); } - sums.increment(bucket, sum); - sumOfSqrs.increment(bucket, sumOfSqr); + sums.set(bucket, sum); + compensations.set(bucket, compensation); + sumOfSqrs.set(bucket, sumOfSqr); + compensationOfSqrs.set(bucket, compensationOfSqr); mins.set(bucket, min); maxes.set(bucket, max); } @@ -196,6 +223,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(counts, maxes, mins, sumOfSqrs, sums); + Releasables.close(counts, maxes, mins, sumOfSqrs, compensationOfSqrs, sums, compensations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 6e06a88cccd32..1f259fbe87d9f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -45,7 +45,7 @@ public static Metrics resolve(String name) { private final double sigma; public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs, double sigma, - DocValueFormat formatter, List pipelineAggregators, Map metaData) { + DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, count, sum, min, max, formatter, pipelineAggregators, metaData); this.sumOfSqrs = sumOfSqrs; this.sigma = sigma; @@ -142,16 +142,25 @@ public String getStdDeviationBoundAsString(Bounds bound) { @Override public InternalExtendedStats doReduce(List aggregations, ReduceContext reduceContext) { double sumOfSqrs = 0; + double compensationOfSqrs = 0; for (InternalAggregation aggregation : aggregations) { InternalExtendedStats stats = (InternalExtendedStats) aggregation; if (stats.sigma != sigma) { throw new IllegalStateException("Cannot reduce other stats aggregations that have a different sigma"); } - sumOfSqrs += stats.getSumOfSquares(); + double value = stats.getSumOfSquares(); + if (Double.isFinite(value) == false) { + sumOfSqrs += value; + } else if (Double.isFinite(sumOfSqrs)) { + double correctedOfSqrs = value - compensationOfSqrs; + double newSumOfSqrs = sumOfSqrs + correctedOfSqrs; + compensationOfSqrs = (newSumOfSqrs - sumOfSqrs) - correctedOfSqrs; + sumOfSqrs = newSumOfSqrs; + } } final InternalStats stats = super.doReduce(aggregations, reduceContext); return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, - format, pipelineAggregators(), getMetaData()); + format, pipelineAggregators(), getMetaData()); } static class Fields { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index 6f723f4fbcb28..fb64d168db6aa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -35,7 +35,7 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i private final double sum; public InternalSum(String name, double sum, DocValueFormat formatter, List pipelineAggregators, - Map metaData) { + Map metaData) { super(name, pipelineAggregators, metaData); this.sum = sum; this.format = formatter; @@ -73,9 +73,20 @@ public double getValue() { @Override public InternalSum doReduce(List aggregations, ReduceContext reduceContext) { + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. double sum = 0; + double compensation = 0; for (InternalAggregation aggregation : aggregations) { - sum += ((InternalSum) aggregation).sum; + double value = ((InternalSum) aggregation).sum; + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } return new InternalSum(name, sum, format, pipelineAggregators(), getMetaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index bd325b39373e5..9ed8103a1e1ee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -43,6 +43,7 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { private final DocValueFormat format; private DoubleArray sums; + private DoubleArray compensations; SumAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -51,6 +52,7 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { this.format = formatter; if (valuesSource != null) { sums = context.bigArrays().newDoubleArray(1, true); + compensations = context.bigArrays().newDoubleArray(1, true); } } @@ -71,13 +73,27 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, @Override public void collect(int doc, long bucket) throws IOException { sums = bigArrays.grow(sums, bucket + 1); + compensations = bigArrays.grow(compensations, bucket + 1); + if (values.advanceExact(doc)) { final int valuesCount = values.docValueCount(); - double sum = 0; + // Compute the sum of double values with Kahan summation algorithm which is more + // accurate than naive summation. + double sum = sums.get(bucket); + double compensation = compensations.get(bucket); for (int i = 0; i < valuesCount; i++) { - sum += values.nextValue(); + double value = values.nextValue(); + if (Double.isFinite(value) == false) { + sum += value; + } else if (Double.isFinite(sum)) { + double corrected = value - compensation; + double newSum = sum + corrected; + compensation = (newSum - sum) - corrected; + sum = newSum; + } } - sums.increment(bucket, sum); + compensations.set(bucket, compensation); + sums.set(bucket, sum); } } }; @@ -106,6 +122,6 @@ public InternalAggregation buildEmptyAggregation() { @Override public void doClose() { - Releasables.close(sums); + Releasables.close(sums, compensations); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index 10b306ad7177c..144305647ebaf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -38,6 +39,8 @@ import java.io.IOException; import java.util.function.Consumer; +import static java.util.Collections.singleton; + public class ExtendedStatsAggregatorTests extends AggregatorTestCase { private static final double TOLERANCE = 1e-5; @@ -132,6 +135,68 @@ public void testRandomLongs() throws IOException { ); } + public void testSummationAccuracy() throws IOException { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyStatsOfDoubles(values, 13.5, 16.21, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + double sumOfSqrs = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + sumOfSqrs += values[i] * values[i]; + } + verifyStatsOfDoubles(values, sum, sumOfSqrs, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + } + + private void verifyStatsOfDoubles(double[] values, double expectedSum, + double expectedSumOfSqrs, double delta) throws IOException { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + final String fieldName = "field"; + ft.setName(fieldName); + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + } + double expectedMax = max; + double expectedMin = min; + testCase(ft, + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField(fieldName, NumericUtils.doubleToSortableLong(value)))); + } + }, + stats -> { + assertEquals(values.length, stats.getCount()); + assertEquals(expectedSum / values.length, stats.getAvg(), delta); + assertEquals(expectedSum, stats.getSum(), delta); + assertEquals(expectedSumOfSqrs, stats.getSumOfSquares(), delta); + assertEquals(expectedMax, stats.getMax(), 0d); + assertEquals(expectedMin, stats.getMin(), 0d); + } + ); + } + public void testCase(MappedFieldType ft, CheckedConsumer buildIndex, Consumer verify) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java index 143ad4553c7dd..6178a72c83e3e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats; @@ -28,6 +29,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -188,4 +190,44 @@ protected InternalExtendedStats mutateInstance(InternalExtendedStats instance) { } return new InternalExtendedStats(name, count, sum, min, max, sumOfSqrs, sigma, formatter, pipelineAggregators, metaData); } + + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySumOfSqrsOfDoubles(values, 13.5, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySumOfSqrsOfDoubles(values, sum, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySumOfSqrsOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySumOfSqrsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySumOfSqrsOfDoubles(double[] values, double expectedSumOfSqrs, double delta) { + List aggregations = new ArrayList<>(values.length); + double sigma = randomDouble(); + for (double sumOfSqrs : values) { + aggregations.add(new InternalExtendedStats("dummy1", 1, 0.0, 0.0, 0.0, sumOfSqrs, sigma, null, null, null)); + } + InternalExtendedStats stats = new InternalExtendedStats("dummy", 1, 0.0, 0.0, 0.0, 0.0, sigma, null, null, null); + InternalExtendedStats reduced = stats.doReduce(aggregations, null); + assertEquals(expectedSumOfSqrs, reduced.getSumOfSquares(), delta); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java index 4ce29e4e0ed83..369fd671eb93c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats; @@ -30,6 +31,7 @@ import org.elasticsearch.test.InternalAggregationTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -48,7 +50,7 @@ protected InternalStats createTestInstance(String name, List } protected InternalStats createInstance(String name, long count, double sum, double min, double max, DocValueFormat formatter, - List pipelineAggregators, Map metaData) { + List pipelineAggregators, Map metaData) { return new InternalStats(name, count, sum, min, max, formatter, pipelineAggregators, metaData); } @@ -74,6 +76,54 @@ protected void assertReduced(InternalStats reduced, List inputs) assertEquals(expectedMax, reduced.getMax(), 0d); } + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyStatsOfDoubles(values, 13.5, 0.9, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyStatsOfDoubles(values, sum, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyStatsOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyStatsOfDoubles(double[] values, double expectedSum, double expectedAvg, double delta) { + List aggregations = new ArrayList<>(values.length); + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + aggregations.add(new InternalStats("dummy1", 1, value, value, value, null, null, null)); + } + InternalStats internalStats = new InternalStats("dummy2", 0, 0.0, 2.0, 0.0, null, null, null); + InternalStats reduced = internalStats.doReduce(aggregations, null); + assertEquals("dummy2", reduced.getName()); + assertEquals(values.length, reduced.getCount()); + assertEquals(expectedSum, reduced.getSum(), delta); + assertEquals(expectedAvg, reduced.getAvg(), delta); + assertEquals(min, reduced.getMin(), 0d); + assertEquals(max, reduced.getMax(), 0d); + } + @Override protected void assertFromXContent(InternalStats aggregation, ParsedAggregation parsedAggregation) { assertTrue(parsedAggregation instanceof ParsedStats); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java index feeefac4daa55..884f9bfbe0d20 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java @@ -20,12 +20,14 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,7 +36,7 @@ public class InternalSumTests extends InternalAggregationTestCase { @Override protected InternalSum createTestInstance(String name, List pipelineAggregators, Map metaData) { - double value = frequently() ? randomDouble() : randomFrom(new Double[] { Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY }); + double value = frequently() ? randomDouble() : randomFrom(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN); DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.BOOLEAN, DocValueFormat.RAW); return new InternalSum(name, value, formatter, pipelineAggregators, metaData); } @@ -50,6 +52,47 @@ protected void assertReduced(InternalSum reduced, List inputs) { assertEquals(expectedSum, reduced.getValue(), 0.0001d); } + public void testSummationAccuracy() { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 13.5, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expected, double delta) { + List aggregations = new ArrayList<>(values.length); + for (double value : values) { + aggregations.add(new InternalSum("dummy1", value, null, null, null)); + } + InternalSum internalSum = new InternalSum("dummy", 0, null, null, null); + InternalSum reduced = internalSum.doReduce(aggregations, null); + assertEquals(expected, reduced.value(), delta); + } + @Override protected void assertFromXContent(InternalSum sum, ParsedAggregation parsedAggregation) { ParsedSum parsed = ((ParsedSum) parsedAggregation); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java index 7286c7de0fed5..c5c1420fb2265 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -36,6 +37,8 @@ import java.io.IOException; import java.util.function.Consumer; +import static java.util.Collections.singleton; + public class StatsAggregatorTests extends AggregatorTestCase { static final double TOLERANCE = 1e-10; @@ -113,6 +116,66 @@ public void testRandomLongs() throws IOException { ); } + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 15.3, 0.9, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expectedSum, + double expectedAvg, double delta) throws IOException { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + ft.setName("field"); + + double max = Double.NEGATIVE_INFINITY; + double min = Double.POSITIVE_INFINITY; + for (double value : values) { + max = Math.max(max, value); + min = Math.min(min, value); + } + double expectedMax = max; + double expectedMin = min; + testCase(ft, + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField("field", NumericUtils.doubleToSortableLong(value)))); + } + }, + stats -> { + assertEquals(values.length, stats.getCount()); + assertEquals(expectedAvg, stats.getAvg(), delta); + assertEquals(expectedSum, stats.getSum(), delta); + assertEquals(expectedMax, stats.getMax(), 0d); + assertEquals(expectedMin, stats.getMin(), 0d); + } + ); + } + public void testCase(MappedFieldType ft, CheckedConsumer buildIndex, Consumer verify) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java index ff9888a4981d3..edaf5ae03f99b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -107,7 +108,7 @@ public void testQueryFiltering() throws IOException { } public void testStringField() throws IOException { - IllegalStateException e = expectThrows(IllegalStateException.class , () -> { + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new SortedDocValuesField(FIELD_NAME, new BytesRef("1")))); }, count -> assertEquals(0L, count.getValue(), 0d)); @@ -116,10 +117,59 @@ public void testStringField() throws IOException { "Re-index with correct docvalues type.", e.getMessage()); } + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifySummationOfDoubles(values, 15.3, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifySummationOfDoubles(values, sum, 1e-10); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifySummationOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifySummationOfDoubles(double[] values, double expected, double delta) throws IOException { + testCase(new MatchAllDocsQuery(), + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value)))); + } + }, + result -> assertEquals(expected, result.getValue(), delta), + NumberFieldMapper.NumberType.DOUBLE + ); + } + private void testCase(Query query, CheckedConsumer indexer, Consumer verify) throws IOException { + testCase(query, indexer, verify, NumberFieldMapper.NumberType.LONG); + } + private void testCase(Query query, + CheckedConsumer indexer, + Consumer verify, + NumberFieldMapper.NumberType fieldNumberType) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { indexer.accept(indexWriter); @@ -128,7 +178,7 @@ private void testCase(Query query, try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(fieldNumberType); fieldType.setName(FIELD_NAME); fieldType.setHasDocValues(true); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java index 2849ede447b60..7835bf75e721f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java @@ -30,13 +30,11 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator; -import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; import java.io.IOException; import java.util.Arrays; @@ -103,8 +101,59 @@ public void testQueryFiltersAll() throws IOException { }); } - private void testCase(Query query, CheckedConsumer buildIndex, Consumer verify) - throws IOException { + public void testSummationAccuracy() throws IOException { + // Summing up a normal array and expect an accurate value + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyAvgOfDoubles(values, 0.9, 0d); + + // Summing up an array which contains NaN and infinities and expect a result same as naive summation + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyAvgOfDoubles(values, sum / n, 1e-10); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyAvgOfDoubles(double[] values, double expected, double delta) throws IOException { + testCase(new MatchAllDocsQuery(), + iw -> { + for (double value : values) { + iw.addDocument(singleton(new NumericDocValuesField("number", NumericUtils.doubleToSortableLong(value)))); + } + }, + avg -> assertEquals(expected, avg.getValue(), delta), + NumberFieldMapper.NumberType.DOUBLE + ); + } + + private void testCase(Query query, + CheckedConsumer buildIndex, + Consumer verify) throws IOException { + testCase(query, buildIndex, verify, NumberFieldMapper.NumberType.LONG); + } + + private void testCase(Query query, + CheckedConsumer buildIndex, + Consumer verify, + NumberFieldMapper.NumberType fieldNumberType) throws IOException { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); @@ -114,7 +163,7 @@ private void testCase(Query query, CheckedConsumer inputs) { assertEquals(sum / counts, reduced.value(), 0.0000001); } + public void testSummationAccuracy() { + double[] values = new double[]{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7}; + verifyAvgOfDoubles(values, 0.9, 0d); + + int n = randomIntBetween(5, 10); + values = new double[n]; + double sum = 0; + for (int i = 0; i < n; i++) { + values[i] = frequently() + ? randomFrom(Double.NaN, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) + : randomDoubleBetween(Double.MIN_VALUE, Double.MAX_VALUE, true); + sum += values[i]; + } + verifyAvgOfDoubles(values, sum / n, TOLERANCE); + + // Summing up some big double values and expect infinity result + n = randomIntBetween(5, 10); + double[] largeValues = new double[n]; + for (int i = 0; i < n; i++) { + largeValues[i] = Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.POSITIVE_INFINITY, 0d); + + for (int i = 0; i < n; i++) { + largeValues[i] = -Double.MAX_VALUE; + } + verifyAvgOfDoubles(largeValues, Double.NEGATIVE_INFINITY, 0d); + } + + private void verifyAvgOfDoubles(double[] values, double expected, double delta) { + List aggregations = new ArrayList<>(values.length); + for (double value : values) { + aggregations.add(new InternalAvg("dummy1", value, 1, null, null, null)); + } + InternalAvg internalAvg = new InternalAvg("dummy2", 0, 0, null, null, null); + InternalAvg reduced = internalAvg.doReduce(aggregations, null); + assertEquals(expected, reduced.getValue(), delta); + } + @Override protected void assertFromXContent(InternalAvg avg, ParsedAggregation parsedAggregation) { ParsedAvg parsed = ((ParsedAvg) parsedAggregation); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index ea846c5dd1841..8f5fe5d5622e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -150,6 +150,7 @@ public abstract class InternalAggregationTestCase extends AbstractWireSerializingTestCase { public static final int DEFAULT_MAX_BUCKETS = 100000; + protected static final double TOLERANCE = 1e-10; private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( new SearchModule(Settings.EMPTY, false, emptyList()).getNamedWriteables()); From 8d195c86ded108b59a4c01cbc9905f131b8e129c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 22 Jan 2018 12:43:34 +0100 Subject: [PATCH 78/94] CountedBitSet doesn't need to extend BitSet. (#28239) --- .../index/seqno/CountedBitSet.java | 36 +++---------------- .../index/seqno/LocalCheckpointTracker.java | 13 ++++--- .../index/translog/MultiSnapshot.java | 5 ++- .../index/seqno/CountedBitSetTests.java | 5 --- .../seqno/LocalCheckpointTrackerTests.java | 3 +- 5 files changed, 13 insertions(+), 49 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java b/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java index 54270de1b01c8..d1f6f4a3a3745 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/CountedBitSet.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.seqno; -import org.apache.lucene.util.BitSet; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.RamUsageEstimator; @@ -28,7 +27,7 @@ * when all bits are set to reduce memory usage. This structure can work well for sequence numbers as * these numbers are likely to form contiguous ranges (eg. filling all bits). */ -public final class CountedBitSet extends BitSet { +public final class CountedBitSet { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(CountedBitSet.class); private short onBits; // Number of bits are set. private FixedBitSet bitset; @@ -41,14 +40,12 @@ public CountedBitSet(short numBits) { this.bitset = new FixedBitSet(numBits); } - @Override public boolean get(int index) { assert 0 <= index && index < this.length(); assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set"; return bitset == null ? true : bitset.get(index); } - @Override public void set(int index) { assert 0 <= index && index < this.length(); assert bitset == null || onBits < bitset.length() : "Bitset should be released when all bits are set"; @@ -67,41 +64,16 @@ public void set(int index) { } } - @Override - public void clear(int startIndex, int endIndex) { - throw new UnsupportedOperationException(); - } - - @Override - public void clear(int index) { - throw new UnsupportedOperationException(); - } + // Below methods are pkg-private for testing - @Override - public int cardinality() { + int cardinality() { return onBits; } - @Override - public int length() { + int length() { return bitset == null ? onBits : bitset.length(); } - @Override - public int prevSetBit(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int nextSetBit(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + (bitset == null ? 0 : bitset.ramBytesUsed()); - } - boolean isInternalBitsetReleased() { return bitset == null; } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 34926a36f4573..cd33c1bf046ed 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.common.SuppressForbidden; /** @@ -39,7 +38,7 @@ public class LocalCheckpointTracker { * A collection of bit sets representing pending sequence numbers. Each sequence number is mapped to a bit set by dividing by the * bit set size. */ - final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); + final LongObjectHashMap processedSeqNo = new LongObjectHashMap<>(); /** * The current local checkpoint, i.e., all sequence numbers no more than this number have been completed. @@ -96,7 +95,7 @@ public synchronized void markSeqNoAsCompleted(final long seqNo) { // this is possible during recovery where we might replay an operation that was also replicated return; } - final BitSet bitSet = getBitSetForSeqNo(seqNo); + final CountedBitSet bitSet = getBitSetForSeqNo(seqNo); final int offset = seqNoToBitSetOffset(seqNo); bitSet.set(offset); if (seqNo == checkpoint + 1) { @@ -170,7 +169,7 @@ assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1) try { // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words long bitSetKey = getBitSetKey(checkpoint); - BitSet current = processedSeqNo.get(bitSetKey); + CountedBitSet current = processedSeqNo.get(bitSetKey); if (current == null) { // the bit set corresponding to the checkpoint has already been removed, set ourselves up for the next bit set assert checkpoint % BIT_SET_SIZE == BIT_SET_SIZE - 1; @@ -184,7 +183,7 @@ assert getBitSetForSeqNo(checkpoint + 1).get(seqNoToBitSetOffset(checkpoint + 1) */ if (checkpoint == lastSeqNoInBitSet(bitSetKey)) { assert current != null; - final BitSet removed = processedSeqNo.remove(bitSetKey); + final CountedBitSet removed = processedSeqNo.remove(bitSetKey); assert removed == current; current = processedSeqNo.get(++bitSetKey); } @@ -210,11 +209,11 @@ private long getBitSetKey(final long seqNo) { return seqNo / BIT_SET_SIZE; } - private BitSet getBitSetForSeqNo(final long seqNo) { + private CountedBitSet getBitSetForSeqNo(final long seqNo) { assert Thread.holdsLock(this); final long bitSetKey = getBitSetKey(seqNo); final int index = processedSeqNo.indexOf(bitSetKey); - final BitSet bitSet; + final CountedBitSet bitSet; if (processedSeqNo.indexExists(index)) { bitSet = processedSeqNo.indexGet(index); } else { diff --git a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java index 910d71a51a0a7..7ea241958f87c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.index.seqno.CountedBitSet; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -85,7 +84,7 @@ public void close() throws IOException { static final class SeqNoSet { static final short BIT_SET_SIZE = 1024; - private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); + private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); /** * Marks this sequence number and returns true if it is seen before. @@ -93,7 +92,7 @@ static final class SeqNoSet { boolean getAndSet(long value) { assert value >= 0; final long key = value / BIT_SET_SIZE; - BitSet bitset = bitSets.get(key); + CountedBitSet bitset = bitSets.get(key); if (bitset == null) { bitset = new CountedBitSet(BIT_SET_SIZE); bitSets.put(key, bitset); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java b/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java index b014f82740640..bc4f58034d1dc 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/CountedBitSetTests.java @@ -26,9 +26,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.lessThan; public class CountedBitSetTests extends ESTestCase { @@ -55,7 +53,6 @@ public void testReleaseInternalBitSet() { int numBits = (short) randomIntBetween(8, 4096); final CountedBitSet countedBitSet = new CountedBitSet((short) numBits); final List values = IntStream.range(0, numBits).boxed().collect(Collectors.toList()); - final long ramBytesUsedWithBitSet = countedBitSet.ramBytesUsed(); for (int i = 1; i < numBits; i++) { final int value = values.get(i); @@ -68,7 +65,6 @@ public void testReleaseInternalBitSet() { assertThat(countedBitSet.isInternalBitsetReleased(), equalTo(false)); assertThat(countedBitSet.length(), equalTo(numBits)); assertThat(countedBitSet.cardinality(), equalTo(i)); - assertThat(countedBitSet.ramBytesUsed(), equalTo(ramBytesUsedWithBitSet)); } // The missing piece to fill all bits. @@ -83,7 +79,6 @@ public void testReleaseInternalBitSet() { assertThat(countedBitSet.isInternalBitsetReleased(), equalTo(true)); assertThat(countedBitSet.length(), equalTo(numBits)); assertThat(countedBitSet.cardinality(), equalTo(numBits)); - assertThat(countedBitSet.ramBytesUsed(), allOf(equalTo(CountedBitSet.BASE_RAM_BYTES_USED), lessThan(ramBytesUsedWithBitSet))); } // Tests with released internal bitset. diff --git a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 31b8c23bf1c79..932fb71790800 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.seqno; import com.carrotsearch.hppc.LongObjectHashMap; -import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -260,7 +259,7 @@ public void testResetCheckpoint() { tracker.resetCheckpoint(localCheckpoint); assertThat(tracker.getCheckpoint(), equalTo((long) localCheckpoint)); assertThat(tracker.getMaxSeqNo(), equalTo((long) maxSeqNo)); - assertThat(tracker.processedSeqNo, new BaseMatcher>() { + assertThat(tracker.processedSeqNo, new BaseMatcher>() { @Override public boolean matches(Object item) { return (item instanceof LongObjectHashMap && ((LongObjectHashMap) item).isEmpty()); From a6bfe67f8b1247068759a392d2e73ee3eaa9ac06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 22 Jan 2018 13:33:35 +0100 Subject: [PATCH 79/94] [Test] Lower bwc version for rank-eval rest tests The API was backported to 6.2 so the version we test against on master can be lowered to that. --- .../test/resources/rest-api-spec/test/rank_eval/10_basic.yml | 4 ++-- .../test/resources/rest-api-spec/test/rank_eval/20_dcg.yml | 4 ++-- .../resources/rest-api-spec/test/rank_eval/30_failures.yml | 4 ++-- .../resources/rest-api-spec/test/rank-eval/30_template.yml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index 2eab6e47e7ff2..4a244dcb9e5e9 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: indices.create: diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml index 3a68890dce9f7..fc5e6576ad4d1 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/20_dcg.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: index: diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml index 48ea593712ef5..24902253eb0d0 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/30_failures.yml @@ -2,8 +2,8 @@ "Response format": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: index: diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml index 9dfbecce75b53..692a2e2123058 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/30_template.yml @@ -2,8 +2,8 @@ "Template request": - skip: - version: " - 6.99.99" - reason: the ranking evaluation feature is only available on 7.0 + version: " - 6.1.99" + reason: the ranking evaluation feature is available since 6.2 - do: indices.create: From 509ecf2aa63560e0e0cb12f89f7f5d7ef09a42f9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 18 Jan 2018 13:26:16 +0100 Subject: [PATCH 80/94] Do not return all indices if a specific alias is requested via get aliases api. If a get alias api call requests a specific alias pattern then indices not having any matching aliases should not be included in the response. Closes #27763 --- .../alias/get/TransportGetAliasesAction.java | 3 +-- .../cluster/metadata/MetaData.java | 12 +++++------- .../admin/indices/RestGetAliasesAction.java | 1 - .../elasticsearch/aliases/IndexAliasesIT.java | 18 ++++-------------- 4 files changed, 10 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index b7ce0407681cf..6edc95f649d40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -62,8 +62,7 @@ protected GetAliasesResponse newResponse() { @Override protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); - @SuppressWarnings("unchecked") - ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); + ImmutableOpenMap> result = state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 0e9bcf8f11a8b..98afe41c59697 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -275,14 +275,12 @@ public ImmutableOpenMap> findAliases(final String[] if (!filteredValues.isEmpty()) { // Make the list order deterministic - CollectionUtil.timSort(filteredValues, new Comparator() { - @Override - public int compare(AliasMetaData o1, AliasMetaData o2) { - return o1.alias().compareTo(o2.alias()); - } - }); + CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias)); + mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); + } else if (matchAllAliases) { + // in case all aliases are requested then it is desired to return the concrete index with no aliases (#25114): + mapBuilder.put(index, Collections.emptyList()); } - mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } return mapBuilder.build(); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 51ff743d2d128..8cf4707262ed6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; diff --git a/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 8bf074be551b1..dae421db97f31 100644 --- a/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -570,24 +570,20 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting alias1"); GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(5)); + assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertTrue(getResponse.getAliases().get("test").isEmpty()); - assertTrue(getResponse.getAliases().get("test123").isEmpty()); - assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); - assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get(); assertThat(existsResponse.exists(), equalTo(true)); logger.info("--> getting all aliases that start with alias*"); getResponse = admin().indices().prepareGetAliases("alias*").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(5)); + assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); @@ -599,10 +595,6 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue()); - assertTrue(getResponse.getAliases().get("test").isEmpty()); - assertTrue(getResponse.getAliases().get("test123").isEmpty()); - assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); - assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("alias*").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -687,13 +679,12 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bar"); getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(2)); + assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("f*") .addIndices("*bar").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -702,14 +693,13 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bac"); getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(2)); + assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("foo") .addIndices("*bac").get(); assertThat(existsResponse.exists(), equalTo(true)); From 0c83ee2a5dc13cbf9069f02b007b89459373b477 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 22 Jan 2018 15:51:46 +0100 Subject: [PATCH 81/94] Trim down usages of `ShardOperationFailedException` interface (#28312) In many cases we use the `ShardOperationFailedException` interface to abstract an exception that can only be of one type, namely `DefaultShardOperationException`. There is no need to use the interface in such cases, the concrete type should be used instead. That has the additional advantage of simplifying parsing such exceptions back from rest responses for the high-level REST client --- .../clear/ClearIndicesCacheResponse.java | 5 ++-- .../TransportClearIndicesCacheAction.java | 4 +-- .../admin/indices/flush/FlushResponse.java | 4 +-- .../indices/flush/TransportFlushAction.java | 5 ++-- .../forcemerge/ForceMergeResponse.java | 4 +-- .../forcemerge/TransportForceMergeAction.java | 4 +-- .../indices/recovery/RecoveryResponse.java | 5 ++-- .../recovery/TransportRecoveryAction.java | 4 +-- .../indices/refresh/RefreshResponse.java | 4 +-- .../refresh/TransportRefreshAction.java | 5 ++-- .../segments/IndicesSegmentResponse.java | 5 ++-- .../TransportIndicesSegmentsAction.java | 4 +-- .../shards/IndicesShardStoresResponse.java | 5 ++-- .../indices/stats/IndicesStatsResponse.java | 5 ++-- .../stats/TransportIndicesStatsAction.java | 4 +-- .../get/TransportUpgradeStatusAction.java | 4 +-- .../upgrade/get/UpgradeStatusResponse.java | 6 ++-- .../upgrade/post/TransportUpgradeAction.java | 4 +-- .../indices/upgrade/post/UpgradeResponse.java | 5 ++-- .../query/TransportValidateQueryAction.java | 3 +- .../validate/query/ValidateQueryResponse.java | 5 ++-- .../support/broadcast/BroadcastResponse.java | 29 +++++++------------ .../node/TransportBroadcastByNodeAction.java | 5 ++-- .../TransportBroadcastReplicationAction.java | 8 ++--- .../indices/stats/IndicesStatsTests.java | 5 ++-- .../TransportBroadcastByNodeActionTests.java | 6 ++-- .../BroadcastReplicationTests.java | 4 +-- .../org/elasticsearch/get/GetActionIT.java | 4 +-- .../indices/stats/IndexStatsIT.java | 7 +++-- .../action/cat/RestRecoveryActionTests.java | 4 +-- .../elasticsearch/test/ESIntegTestCase.java | 4 +-- .../hamcrest/ElasticsearchAssertions.java | 6 ++-- 32 files changed, 87 insertions(+), 89 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index cd3355cae8766..d0f4b3cc20beb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,7 +38,8 @@ public class ClearIndicesCacheResponse extends BroadcastResponse { } - ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 0ad94db7b1f30..eda82fb710ca0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -65,7 +65,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { @Override protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, int failedShards, List responses, - List shardFailures, ClusterState clusterState) { + List shardFailures, ClusterState clusterState) { return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java index c2ac70026454c..273fc3e817d46 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import java.util.List; @@ -35,7 +35,7 @@ public class FlushResponse extends BroadcastResponse { } - FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index a29918b438ef3..91755388320a3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -57,7 +57,8 @@ protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardI } @Override - protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List + shardFailures) { return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index 3844f00193c5f..f77bb5d6a57de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import java.util.List; @@ -32,7 +32,7 @@ public class ForceMergeResponse extends BroadcastResponse { ForceMergeResponse() { } - ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 18ac88e1b3056..94f27a93624d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -62,7 +62,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index a19393ebd5beb..1a9c86049f8c6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -56,7 +56,8 @@ public RecoveryResponse() { } * @param shardFailures List of failures processing shards */ public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardRecoveryStates, List shardFailures) { + Map> shardRecoveryStates, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shardRecoveryStates = shardRecoveryStates; this.detailed = detailed; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 01f37527374fc..0e11aed9d24fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -69,7 +69,7 @@ protected RecoveryState readShardResult(StreamInput in) throws IOException { @Override - protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { Map> shardResponses = new HashMap<>(); for (RecoveryState recoveryState : responses) { if (recoveryState == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index ba3ec31c6a544..b629ac22b89a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import java.util.List; @@ -32,7 +32,7 @@ public class RefreshResponse extends BroadcastResponse { RefreshResponse() { } - RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 9752e68517e15..d44783d3c64f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; @@ -61,7 +61,8 @@ protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardI } @Override - protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, + List shardFailures) { return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 2e241ef1614b9..b9296c0242fdb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Accountable; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -53,7 +53,8 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont } - IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 350e8dffa1999..94b12c9ab17d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -77,7 +77,7 @@ protected ShardSegments readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { + protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 70624380e8611..6cf160897482c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -25,7 +25,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -348,7 +347,7 @@ public void writeTo(StreamOutput out) throws IOException { } } out.writeVInt(failures.size()); - for (ShardOperationFailedException failure : failures) { + for (Failure failure : failures) { failure.writeTo(out); } } @@ -357,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (failures.size() > 0) { builder.startArray(Fields.FAILURES); - for (ShardOperationFailedException failure : failures) { + for (Failure failure : failures) { builder.startObject(); failure.toXContent(builder, params); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 5fcd4e5e62e9f..24a0e10e86695 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; @@ -48,7 +48,8 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten } - IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index bed820189d1a8..50d7712da11d0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -79,7 +79,7 @@ protected ShardStats readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index c2c4424d4c897..19566acaf7af4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.indices.upgrade.get; import org.elasticsearch.Version; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -79,7 +79,7 @@ protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException } @Override - protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 565348f5ac22b..71110f18b875c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -19,11 +19,10 @@ package org.elasticsearch.action.admin.indices.upgrade.get; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,7 +42,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte UpgradeStatusResponse() { } - UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 87f39336047b2..67e51c8e5575c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -22,8 +22,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.PrimaryMissingActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -71,7 +71,7 @@ public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterS } @Override - protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List shardUpgradeResults, List shardFailures, ClusterState clusterState) { + protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List shardUpgradeResults, List shardFailures, ClusterState clusterState) { Map successfulPrimaryShards = new HashMap<>(); Map> versions = new HashMap<>(); for (ShardUpgradeResult result : shardUpgradeResults) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index 64e958372cdc8..db49921d43532 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.Version; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,7 +44,8 @@ public class UpgradeResponse extends BroadcastResponse { } - UpgradeResponse(Map> versions, int totalShards, int successfulShards, int failedShards, List shardFailures) { + UpgradeResponse(Map> versions, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.versions = versions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index c4369a30586d0..0513a37e4fe0e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -115,7 +114,7 @@ protected ValidateQueryResponse newResponse(ValidateQueryRequest request, Atomic int successfulShards = 0; int failedShards = 0; boolean valid = true; - List shardFailures = null; + List shardFailures = null; List queryExplanations = null; for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 2d3c0a0a90eff..eff37ff4b0cb4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -46,7 +46,8 @@ public class ValidateQueryResponse extends BroadcastResponse { } - ValidateQueryResponse(boolean valid, List queryExplanations, int totalShards, int successfulShards, int failedShards, List shardFailures) { + ValidateQueryResponse(boolean valid, List queryExplanations, int totalShards, int successfulShards, int failedShards, + List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.valid = valid; this.queryExplanations = queryExplanations; diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index e608e8e0ab7d6..2baf5a1d50ec1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -20,11 +20,10 @@ package org.elasticsearch.action.support.broadcast; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.index.shard.ShardNotFoundException; import java.io.IOException; import java.util.List; @@ -35,30 +34,24 @@ * Base class for all broadcast operation based responses. */ public class BroadcastResponse extends ActionResponse { - private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; + private static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; private int totalShards; private int successfulShards; private int failedShards; - private ShardOperationFailedException[] shardFailures = EMPTY; + private DefaultShardOperationFailedException[] shardFailures = EMPTY; public BroadcastResponse() { } public BroadcastResponse(int totalShards, int successfulShards, int failedShards, - List shardFailures) { - assertNoShardNotAvailableFailures(shardFailures); + List shardFailures) { this.totalShards = totalShards; this.successfulShards = successfulShards; this.failedShards = failedShards; - this.shardFailures = shardFailures == null ? EMPTY : - shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]); - } - - private void assertNoShardNotAvailableFailures(List shardFailures) { - if (shardFailures != null) { - for (Object e : shardFailures) { - assert (e instanceof ShardNotFoundException) == false : "expected no ShardNotFoundException failures, but got " + e; - } + if (shardFailures == null) { + this.shardFailures = EMPTY; + } else { + this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[shardFailures.size()]); } } @@ -97,7 +90,7 @@ public RestStatus getStatus() { /** * The list of shard failures exception. */ - public ShardOperationFailedException[] getShardFailures() { + public DefaultShardOperationFailedException[] getShardFailures() { return shardFailures; } @@ -109,7 +102,7 @@ public void readFrom(StreamInput in) throws IOException { failedShards = in.readVInt(); int size = in.readVInt(); if (size > 0) { - shardFailures = new ShardOperationFailedException[size]; + shardFailures = new DefaultShardOperationFailedException[size]; for (int i = 0; i < size; i++) { shardFailures[i] = readShardOperationFailed(in); } @@ -123,7 +116,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(successfulShards); out.writeVInt(failedShards); out.writeVInt(shardFailures.length); - for (ShardOperationFailedException exp : shardFailures) { + for (DefaultShardOperationFailedException exp : shardFailures) { exp.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 3ef967472a597..b6eaa5163c865 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; @@ -131,7 +130,7 @@ private Response newResponse( int totalShards = 0; int successfulShards = 0; List broadcastByNodeResponses = new ArrayList<>(); - List exceptions = new ArrayList<>(); + List exceptions = new ArrayList<>(); for (int i = 0; i < responses.length(); i++) { if (responses.get(i) instanceof FailedNodeException) { FailedNodeException exception = (FailedNodeException) responses.get(i); @@ -176,7 +175,7 @@ private Response newResponse( * @param clusterState the cluster state * @return the response */ - protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState); + protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState); /** * Deserialize a request from an input stream diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index 8193cf77cebef..4cad1c211700d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; @@ -76,7 +75,7 @@ protected final void doExecute(final Request request, final ActionListener listener) { final ClusterState clusterState = clusterService.state(); List shards = shards(request, clusterState); - final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList(); + final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList<>(); if (shards.size() == 0) { finishAndNotifyListener(listener, shardsResponses); } @@ -148,7 +147,7 @@ private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayLi int successfulShards = 0; int failedShards = 0; int totalNumCopies = 0; - List shardFailures = null; + List shardFailures = null; for (int i = 0; i < shardsResponses.size(); i++) { ReplicationResponse shardResponse = shardsResponses.get(i); if (shardResponse == null) { @@ -168,5 +167,6 @@ private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayLi listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); } - protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures); + protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, + List shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java index be84a8880641f..26785d2c8706c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -31,7 +31,6 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.List; import java.util.concurrent.TimeUnit; @@ -158,7 +157,7 @@ public void testRefreshListeners() throws Exception { * Gives access to package private IndicesStatsResponse constructor for test purpose. **/ public static IndicesStatsResponse newIndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, - int failedShards, List shardFailures) { + int failedShards, List shardFailures) { return new IndicesStatsResponse(shards, totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 470da323043ae..6a7d443553888 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -22,8 +22,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -109,7 +109,7 @@ public static class Response extends BroadcastResponse { public Response() { } - public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { + public Response(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } } @@ -127,7 +127,7 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { + protected Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List emptyResults, List shardFailures, ClusterState clusterState) { return new Response(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 3aeab0fa5fb5b..15d7f6d7c5992 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -21,12 +21,12 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; @@ -221,7 +221,7 @@ protected BasicReplicationRequest newShardRequest(DummyBroadcastRequest request, @Override protected BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, - List shardFailures) { + List shardFailures) { return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index d468d58212d16..911e26528c9ad 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -30,6 +29,7 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetRequestBuilder; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -748,7 +748,7 @@ public void testGetFieldsComplexField() throws Exception { if (flushResponse.getSuccessfulShards() == 0) { StringBuilder sb = new StringBuilder("failed to flush at least one shard. total shards [") .append(flushResponse.getTotalShards()).append("], failed shards: [").append(flushResponse.getFailedShards()).append("]"); - for (ShardOperationFailedException failure: flushResponse.getShardFailures()) { + for (DefaultShardOperationFailedException failure: flushResponse.getShardFailures()) { sb.append("\nShard failure: ").append(failure); } fail(sb.toString()); diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index dd4635d30f24d..f25a9234698b2 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -37,6 +36,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -1113,7 +1113,8 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean failed = new AtomicBoolean(); - final AtomicReference> shardFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); + final AtomicReference> shardFailures = + new AtomicReference<>(new CopyOnWriteArrayList<>()); final AtomicReference> executionFailures = new AtomicReference<>(new CopyOnWriteArrayList<>()); // increasing the number of shards increases the number of chances any one stats request will hit a race @@ -1191,7 +1192,7 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti thread.join(); } - assertThat(shardFailures.get(), emptyCollectionOf(ShardOperationFailedException.class)); + assertThat(shardFailures.get(), emptyCollectionOf(DefaultShardOperationFailedException.class)); assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index 148af7f7d875f..ffebd804c609c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.rest.action.cat; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -110,7 +110,7 @@ public void testRestRecoveryAction() { Randomness.shuffle(shuffle); shardRecoveryStates.put("index", shuffle); - final List shardFailures = new ArrayList<>(); + final List shardFailures = new ArrayList<>(); final RecoveryResponse response = new RecoveryResponse( totalShards, successfulShards, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index e633f5adb70af..0097621e06292 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -32,7 +32,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -62,6 +61,7 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; @@ -1275,7 +1275,7 @@ protected final void flushAndRefresh(String... indices) { protected final FlushResponse flush(String... indices) { waitForRelocation(); FlushResponse actionGet = client().admin().indices().prepareFlush(indices).execute().actionGet(); - for (ShardOperationFailedException failure : actionGet.getShardFailures()) { + for (DefaultShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } return actionGet; diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 4eaaa96df7649..ff31240169ef7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; @@ -41,6 +40,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -163,7 +163,7 @@ public static void assertBlocked(ActionRequestBuilder builder) { * */ public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) { assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards())); - for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { + for (DefaultShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class); assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException); assertThat(clusterBlockException.blocks().size(), greaterThan(0)); @@ -203,7 +203,7 @@ public static String formatShardStatus(BroadcastResponse response) { msg.append(" Total shards: ").append(response.getTotalShards()) .append(" Successful shards: ").append(response.getSuccessfulShards()) .append(" & ").append(response.getFailedShards()).append(" shard failures:"); - for (ShardOperationFailedException failure : response.getShardFailures()) { + for (DefaultShardOperationFailedException failure : response.getShardFailures()) { msg.append("\n ").append(failure); } return msg.toString(); From ba5b5832039b591cfb00b8587966bd1f4ac28c40 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 22 Jan 2018 11:55:54 -0700 Subject: [PATCH 82/94] Notify affixMap settings when any under the registered prefix matches (#28317) * Notify affixMap settings when any under the registered prefix matches Previously if an affixMap setting was registered, and then a completely different setting was applied, the affixMap update consumer would be notified with an empty map. This caused settings that were previously set to be unset in local state in a consumer that assumed it would only be called when the affixMap setting was changed. This commit changes the behavior so if a prefix `foo.` is registered, any setting under the prefix will have the update consumer notified if there are changes starting with `foo.`. Resolves #28316 * Add unit test * Address feedback --- .../common/settings/Setting.java | 4 +- .../allocation/FilteringAllocationIT.java | 58 +++++++++++++++++++ .../common/settings/ScopedSettingsTests.java | 44 ++++++++++++++ 3 files changed, 104 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index bc22dbb63ebd8..fd91a8a7601c6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -597,7 +597,7 @@ AbstractScopedSettings.SettingUpdater> newAffixMapUpdater(Consume @Override public boolean hasChanged(Settings current, Settings previous) { - return Stream.concat(matchStream(current), matchStream(previous)).findAny().isPresent(); + return current.filter(k -> match(k)).equals(previous.filter(k -> match(k))) == false; } @Override @@ -612,7 +612,7 @@ public Map getValue(Settings current, Settings previous) { if (updater.hasChanged(current, previous)) { // only the ones that have changed otherwise we might get too many updates // the hasChanged above checks only if there are any changes - T value = updater.getValue(current, previous); + T value = updater.getValue(current, previous); if ((omitDefaults && value.equals(concreteSetting.getDefault(current))) == false) { result.put(namespace, value); } diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 91a41495a461a..d887387d43fe9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -21,11 +21,14 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -34,7 +37,9 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -156,5 +161,58 @@ public void testInvalidIPFilterClusterSettings() { .execute().actionGet()); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } + + public void testTransientSettingsStillApplied() throws Exception { + List nodes = internalCluster().startNodes(6); + Set excludeNodes = new HashSet<>(nodes.subList(0, 3)); + Set includeNodes = new HashSet<>(nodes.subList(3, 6)); + logger.info("--> exclude: [{}], include: [{}]", + Strings.collectionToCommaDelimitedString(excludeNodes), + Strings.collectionToCommaDelimitedString(includeNodes)); + ensureStableCluster(6); + client().admin().indices().prepareCreate("test").get(); + ensureGreen("test"); + + Settings exclude = Settings.builder().put("cluster.routing.allocation.exclude._name", + Strings.collectionToCommaDelimitedString(excludeNodes)).build(); + + logger.info("--> updating settings"); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(exclude).get(); + + logger.info("--> waiting for relocation"); + waitForRelocation(ClusterHealthStatus.GREEN); + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + + for (ShardRouting shard : state.getRoutingTable().shardsWithState(ShardRoutingState.STARTED)) { + String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); + logger.info("--> shard on {} - {}", node, shard); + assertTrue("shard on " + node + " but should only be on the include node list: " + + Strings.collectionToCommaDelimitedString(includeNodes), + includeNodes.contains(node)); + } + + Settings other = Settings.builder().put("cluster.info.update.interval", "45s").build(); + + logger.info("--> updating settings with random persistent setting"); + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(other).setTransientSettings(exclude).get(); + + logger.info("--> waiting for relocation"); + waitForRelocation(ClusterHealthStatus.GREEN); + + state = client().admin().cluster().prepareState().get().getState(); + + // The transient settings still exist in the state + assertThat(state.metaData().transientSettings(), equalTo(exclude)); + + for (ShardRouting shard : state.getRoutingTable().shardsWithState(ShardRoutingState.STARTED)) { + String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); + logger.info("--> shard on {} - {}", node, shard); + assertTrue("shard on " + node + " but should only be on the include node list: " + + Strings.collectionToCommaDelimitedString(includeNodes), + includeNodes.contains(node)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 29c7a2b161403..0f4d0cf66346a 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -261,6 +261,21 @@ public void testAddConsumerAffixMap() { assertEquals(2, listResults.size()); assertEquals(2, intResults.size()); + service.applySettings(Settings.builder() + .put("foo.test.bar", 2) + .put("foo.test_1.bar", 7) + .putList("foo.test_list.list", "16", "17") + .putList("foo.test_list_1.list", "18", "19", "20") + .build()); + + assertEquals(2, intResults.get("test").intValue()); + assertEquals(7, intResults.get("test_1").intValue()); + assertEquals(Arrays.asList(16, 17), listResults.get("test_list")); + assertEquals(Arrays.asList(18, 19, 20), listResults.get("test_list_1")); + assertEquals(2, listResults.size()); + assertEquals(2, intResults.size()); + + listResults.clear(); intResults.clear(); @@ -286,6 +301,35 @@ public void testAddConsumerAffixMap() { } + public void testAffixMapConsumerNotCalledWithNull() { + Setting.AffixSetting prefixSetting = Setting.prefixKeySetting("eggplant.", + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + Setting.AffixSetting otherSetting = Setting.prefixKeySetting("other.", + (k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope)); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(prefixSetting, otherSetting))); + Map affixResults = new HashMap<>(); + + Consumer> consumer = (map) -> { + logger.info("--> consuming settings {}", map); + affixResults.clear(); + affixResults.putAll(map); + }; + service.addAffixMapUpdateConsumer(prefixSetting, consumer, (s, k) -> {}, randomBoolean()); + assertEquals(0, affixResults.size()); + service.applySettings(Settings.builder() + .put("eggplant._name", 2) + .build()); + assertThat(affixResults.size(), equalTo(1)); + assertThat(affixResults.get("_name"), equalTo(2)); + + service.applySettings(Settings.builder() + .put("eggplant._name", 2) + .put("other.thing", 3) + .build()); + + assertThat(affixResults.get("_name"), equalTo(2)); + } + public void testApply() { Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); From ef5c0418198215c1f932990c3f3db1c05087dc9a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 22 Jan 2018 13:01:13 -0800 Subject: [PATCH 83/94] Painless: Replace Painless Type with Java Class during Casts (#27847) This is the first step in a series to replace Painless Type with Java Class for any casting done during compilation. There should be no behavioural change. --- .../painless/AnalyzerCaster.java | 835 +++++++++--------- .../elasticsearch/painless/Definition.java | 119 ++- .../elasticsearch/painless/MethodWriter.java | 91 +- .../elasticsearch/painless/node/ECast.java | 3 +- .../painless/AnalyzerCasterTests.java | 8 +- .../painless/node/NodeToStringTests.java | 4 +- 6 files changed, 597 insertions(+), 463 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java index 89f358d17e0c0..7bae2c7fcad69 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java @@ -21,6 +21,7 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition.def; import java.util.Objects; @@ -36,447 +37,459 @@ public AnalyzerCaster(Definition definition) { this.definition = definition; } - public Cast getLegalCast(Location location, Type actual, Type expected, boolean explicit, boolean internal) { - Objects.requireNonNull(actual); - Objects.requireNonNull(expected); + public Cast getLegalCast(Location location, Type actualType, Type expectedType, boolean explicit, boolean internal) { + Objects.requireNonNull(actualType); + Objects.requireNonNull(expectedType); - if (actual.equals(expected)) { + Class actual = actualType.clazz; + Class expected = expectedType.clazz; + + if (actualType.dynamic) { + actual = Definition.ObjectClassTodefClass(actual); + } + + if (expectedType.dynamic) { + expected = Definition.ObjectClassTodefClass(expected); + } + + if (actual == expected) { return null; } - if (actual.dynamic) { - if (expected.clazz == boolean.class) { - return Cast.unboxTo(definition.DefType, definition.BooleanType, explicit, definition.booleanType); - } else if (expected.clazz == byte.class) { - return Cast.unboxTo(definition.DefType, definition.ByteType, explicit, definition.byteType); - } else if (expected.clazz == short.class) { - return Cast.unboxTo(definition.DefType, definition.ShortType, explicit, definition.shortType); - } else if (expected.clazz == char.class) { - return Cast.unboxTo(definition.DefType, definition.CharacterType, explicit, definition.charType); - } else if (expected.clazz == int.class) { - return Cast.unboxTo(definition.DefType, definition.IntegerType, explicit, definition.intType); - } else if (expected.clazz == long.class) { - return Cast.unboxTo(definition.DefType, definition.LongType, explicit, definition.longType); - } else if (expected.clazz == float.class) { - return Cast.unboxTo(definition.DefType, definition.FloatType, explicit, definition.floatType); - } else if (expected.clazz == double.class) { - return Cast.unboxTo(definition.DefType, definition.DoubleType, explicit, definition.doubleType); + if (actual == def.class) { + if (expected == boolean.class) { + return Cast.unboxTo(def.class, Boolean.class, explicit, boolean.class); + } else if (expected == byte.class) { + return Cast.unboxTo(def.class, Byte.class, explicit, byte.class); + } else if (expected == short.class) { + return Cast.unboxTo(def.class, Short.class, explicit, short.class); + } else if (expected == char.class) { + return Cast.unboxTo(def.class, Character.class, explicit, char.class); + } else if (expected == int.class) { + return Cast.unboxTo(def.class, Integer.class, explicit, int.class); + } else if (expected == long.class) { + return Cast.unboxTo(def.class, Long.class, explicit, long.class); + } else if (expected == float.class) { + return Cast.unboxTo(def.class, Float.class, explicit, float.class); + } else if (expected == double.class) { + return Cast.unboxTo(def.class, Double.class, explicit, double.class); } - } else if (actual.clazz == Object.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.ByteType, true, definition.byteType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.ShortType, true, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.CharacterType, true, definition.charType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.IntegerType, true, definition.intType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.LongType, true, definition.longType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.FloatType, true, definition.floatType); - } else if (expected.clazz == double.class && explicit && internal) { - return Cast.unboxTo(definition.ObjectType, definition.DoubleType, true, definition.doubleType); + } else if (actual == Object.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxTo(Object.class, Byte.class, true, byte.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxTo(Object.class, Short.class, true, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxTo(Object.class, Character.class, true, char.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxTo(Object.class, Integer.class, true, int.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxTo(Object.class, Long.class, true, long.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxTo(Object.class, Float.class, true, float.class); + } else if (expected == double.class && explicit && internal) { + return Cast.unboxTo(Object.class, Double.class, true, double.class); } - } else if (actual.clazz == Number.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.ByteType, true, definition.byteType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.ShortType, true, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.CharacterType, true, definition.charType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.IntegerType, true, definition.intType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.LongType, true, definition.longType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.FloatType, true, definition.floatType); - } else if (expected.clazz == double.class && explicit && internal) { - return Cast.unboxTo(definition.NumberType, definition.DoubleType, true, definition.doubleType); + } else if (actual == Number.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxTo(Number.class, Byte.class, true, byte.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxTo(Number.class, Short.class, true, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxTo(Number.class, Character.class, true, char.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxTo(Number.class, Integer.class, true, int.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxTo(Number.class, Long.class, true, long.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxTo(Number.class, Float.class, true, float.class); + } else if (expected == double.class && explicit && internal) { + return Cast.unboxTo(Number.class, Double.class, true, double.class); } - } else if (actual.clazz == String.class) { - if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.StringType, definition.charType, true); + } else if (actual == String.class) { + if (expected == char.class && explicit) { + return Cast.standard(String.class, char.class, true); } - } else if (actual.clazz == boolean.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.BooleanType, definition.DefType, explicit, definition.booleanType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.BooleanType, definition.ObjectType, explicit, definition.booleanType); - } else if (expected.clazz == Boolean.class && internal) { - return Cast.boxTo(definition.booleanType, definition.booleanType, explicit, definition.booleanType); + } else if (actual == boolean.class) { + if (expected == def.class) { + return Cast.boxFrom(Boolean.class, def.class, explicit, boolean.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Boolean.class, Object.class, explicit, boolean.class); + } else if (expected == Boolean.class && internal) { + return Cast.boxTo(boolean.class, boolean.class, explicit, boolean.class); } - } else if (actual.clazz == byte.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.ByteType, definition.DefType, explicit, definition.byteType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.ByteType, definition.ObjectType, explicit, definition.byteType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.ByteType, definition.NumberType, explicit, definition.byteType); - } else if (expected.clazz == short.class) { - return Cast.standard(definition.byteType, definition.shortType, explicit); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.byteType, definition.charType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.byteType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.byteType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.byteType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.byteType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && internal) { - return Cast.boxTo(definition.byteType, definition.byteType, explicit, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.byteType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.byteType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.byteType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.byteType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.byteType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.byteType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == byte.class) { + if (expected == def.class) { + return Cast.boxFrom(Byte.class, def.class, explicit, byte.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Byte.class, Object.class, explicit, byte.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Byte.class, Number.class, explicit, byte.class); + } else if (expected == short.class) { + return Cast.standard(byte.class, short.class, explicit); + } else if (expected == char.class && explicit) { + return Cast.standard(byte.class, char.class, true); + } else if (expected == int.class) { + return Cast.standard(byte.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(byte.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(byte.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(byte.class, double.class, explicit); + } else if (expected == Byte.class && internal) { + return Cast.boxTo(byte.class, byte.class, explicit, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(byte.class, short.class, explicit, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(byte.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(byte.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(byte.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(byte.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(byte.class, double.class, explicit, double.class); } - } else if (actual.clazz == short.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.ShortType, definition.DefType, explicit, definition.shortType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.ShortType, definition.ObjectType, explicit, definition.shortType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.ShortType, definition.NumberType, explicit, definition.shortType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.shortType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.shortType, definition.charType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.shortType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.shortType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.shortType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.shortType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.shortType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.shortType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.shortType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.shortType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.shortType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.shortType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.shortType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == short.class) { + if (expected == def.class) { + return Cast.boxFrom(Short.class, def.class, explicit, short.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Short.class, Object.class, explicit, short.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Short.class, Number.class, explicit, short.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(short.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(short.class, char.class, true); + } else if (expected == int.class) { + return Cast.standard(short.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(short.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(short.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(short.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(short.class, byte.class, true, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(short.class, short.class, explicit, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(short.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(short.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(short.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(short.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(short.class, double.class, explicit, double.class); } - } else if (actual.clazz == char.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.CharacterType, definition.DefType, explicit, definition.charType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.CharacterType, definition.ObjectType, explicit, definition.charType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.CharacterType, definition.NumberType, explicit, definition.charType); - } else if (expected.clazz == String.class) { - return Cast.standard(definition.charType, definition.StringType, explicit); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.charType, definition.byteType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.charType, definition.shortType, true); - } else if (expected.clazz == int.class) { - return Cast.standard(definition.charType, definition.intType, explicit); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.charType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.charType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.charType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.charType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && internal) { - return Cast.boxTo(definition.charType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == Character.class && internal) { - return Cast.boxTo(definition.charType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.charType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.charType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.charType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.charType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == char.class) { + if (expected == def.class) { + return Cast.boxFrom(Character.class, def.class, explicit, char.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Character.class, Object.class, explicit, char.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Character.class, Number.class, explicit, char.class); + } else if (expected == String.class) { + return Cast.standard(char.class, String.class, explicit); + } else if (expected == byte.class && explicit) { + return Cast.standard(char.class, byte.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(char.class, short.class, true); + } else if (expected == int.class) { + return Cast.standard(char.class, int.class, explicit); + } else if (expected == long.class) { + return Cast.standard(char.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(char.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(char.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(char.class, byte.class, true, byte.class); + } else if (expected == Short.class && internal) { + return Cast.boxTo(char.class, short.class, explicit, short.class); + } else if (expected == Character.class && internal) { + return Cast.boxTo(char.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(char.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(char.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(char.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(char.class, double.class, explicit, double.class); } - } else if (actual.clazz == int.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.IntegerType, definition.DefType, explicit, definition.intType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.IntegerType, definition.ObjectType, explicit, definition.intType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.IntegerType, definition.NumberType, explicit, definition.intType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.intType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.intType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.intType, definition.shortType, true); - } else if (expected.clazz == long.class) { - return Cast.standard(definition.intType, definition.longType, explicit); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.intType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.intType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.intType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && internal) { - return Cast.boxTo(definition.intType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.intType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.intType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.intType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == int.class) { + if (expected == def.class) { + return Cast.boxFrom(Integer.class, def.class, explicit, int.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Integer.class, Object.class, explicit, int.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Integer.class, Number.class, explicit, int.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(int.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(int.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(int.class, short.class, true); + } else if (expected == long.class) { + return Cast.standard(int.class, long.class, explicit); + } else if (expected == float.class) { + return Cast.standard(int.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(int.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(int.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(int.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(int.class, char.class, true, char.class); + } else if (expected == Integer.class && internal) { + return Cast.boxTo(int.class, int.class, explicit, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(int.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(int.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(int.class, double.class, explicit, double.class); } - } else if (actual.clazz == long.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.LongType, definition.DefType, explicit, definition.longType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.LongType, definition.ObjectType, explicit, definition.longType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.LongType, definition.NumberType, explicit, definition.longType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.longType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.longType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.longType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.longType, definition.intType, true); - } else if (expected.clazz == float.class) { - return Cast.standard(definition.longType, definition.floatType, explicit); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.longType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.longType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && internal) { - return Cast.boxTo(definition.longType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.longType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.longType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == long.class) { + if (expected == def.class) { + return Cast.boxFrom(Long.class, def.class, explicit, long.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Long.class, Object.class, explicit, long.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Long.class, Number.class, explicit, long.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(long.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(long.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(long.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(long.class, int.class, true); + } else if (expected == float.class) { + return Cast.standard(long.class, float.class, explicit); + } else if (expected == double.class) { + return Cast.standard(long.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(long.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(long.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(long.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(long.class, int.class, true, int.class); + } else if (expected == Long.class && internal) { + return Cast.boxTo(long.class, long.class, explicit, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(long.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(long.class, double.class, explicit, double.class); } - } else if (actual.clazz == float.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.FloatType, definition.DefType, explicit, definition.floatType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.FloatType, definition.ObjectType, explicit, definition.floatType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.FloatType, definition.NumberType, explicit, definition.floatType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.floatType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.floatType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.floatType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.floatType, definition.intType, true); - } else if (expected.clazz == long.class && explicit) { - return Cast.standard(definition.floatType, definition.longType, true); - } else if (expected.clazz == double.class) { - return Cast.standard(definition.floatType, definition.doubleType, explicit); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && explicit && internal) { - return Cast.boxTo(definition.floatType, definition.longType, true, definition.longType); - } else if (expected.clazz == Float.class && internal) { - return Cast.boxTo(definition.floatType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.floatType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == float.class) { + if (expected == def.class) { + return Cast.boxFrom(Float.class, def.class, explicit, float.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Float.class, Object.class, explicit, float.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Float.class, Number.class, explicit, float.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(float.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(float.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(float.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(float.class, int.class, true); + } else if (expected == long.class && explicit) { + return Cast.standard(float.class, long.class, true); + } else if (expected == double.class) { + return Cast.standard(float.class, double.class, explicit); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(float.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(float.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(float.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(float.class, int.class, true, int.class); + } else if (expected == Long.class && explicit && internal) { + return Cast.boxTo(float.class, long.class, true, long.class); + } else if (expected == Float.class && internal) { + return Cast.boxTo(float.class, float.class, explicit, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(float.class, double.class, explicit, double.class); } - } else if (actual.clazz == double.class) { - if (expected.dynamic) { - return Cast.boxFrom(definition.DoubleType, definition.DefType, explicit, definition.doubleType); - } else if (expected.clazz == Object.class && internal) { - return Cast.boxFrom(definition.DoubleType, definition.ObjectType, explicit, definition.doubleType); - } else if (expected.clazz == Number.class && internal) { - return Cast.boxFrom(definition.DoubleType, definition.NumberType, explicit, definition.doubleType); - } else if (expected.clazz == byte.class && explicit) { - return Cast.standard(definition.doubleType, definition.byteType, true); - } else if (expected.clazz == char.class && explicit) { - return Cast.standard(definition.doubleType, definition.charType, true); - } else if (expected.clazz == short.class && explicit) { - return Cast.standard(definition.doubleType, definition.shortType, true); - } else if (expected.clazz == int.class && explicit) { - return Cast.standard(definition.doubleType, definition.intType, true); - } else if (expected.clazz == long.class && explicit) { - return Cast.standard(definition.doubleType, definition.longType, true); - } else if (expected.clazz == float.class && explicit) { - return Cast.standard(definition.doubleType, definition.floatType, true); - } else if (expected.clazz == Byte.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.byteType, true, definition.byteType); - } else if (expected.clazz == Short.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.shortType, true, definition.shortType); - } else if (expected.clazz == Character.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.charType, true, definition.charType); - } else if (expected.clazz == Integer.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.intType, true, definition.intType); - } else if (expected.clazz == Long.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.longType, true, definition.longType); - } else if (expected.clazz == Float.class && explicit && internal) { - return Cast.boxTo(definition.doubleType, definition.floatType, true, definition.floatType); - } else if (expected.clazz == Double.class && internal) { - return Cast.boxTo(definition.doubleType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == double.class) { + if (expected == def.class) { + return Cast.boxFrom(Double.class, def.class, explicit, double.class); + } else if (expected == Object.class && internal) { + return Cast.boxFrom(Double.class, Object.class, explicit, double.class); + } else if (expected == Number.class && internal) { + return Cast.boxFrom(Double.class, Number.class, explicit, double.class); + } else if (expected == byte.class && explicit) { + return Cast.standard(double.class, byte.class, true); + } else if (expected == char.class && explicit) { + return Cast.standard(double.class, char.class, true); + } else if (expected == short.class && explicit) { + return Cast.standard(double.class, short.class, true); + } else if (expected == int.class && explicit) { + return Cast.standard(double.class, int.class, true); + } else if (expected == long.class && explicit) { + return Cast.standard(double.class, long.class, true); + } else if (expected == float.class && explicit) { + return Cast.standard(double.class, float.class, true); + } else if (expected == Byte.class && explicit && internal) { + return Cast.boxTo(double.class, byte.class, true, byte.class); + } else if (expected == Short.class && explicit && internal) { + return Cast.boxTo(double.class, short.class, true, short.class); + } else if (expected == Character.class && explicit && internal) { + return Cast.boxTo(double.class, char.class, true, char.class); + } else if (expected == Integer.class && explicit && internal) { + return Cast.boxTo(double.class, int.class, true, int.class); + } else if (expected == Long.class && explicit && internal) { + return Cast.boxTo(double.class, long.class, true, long.class); + } else if (expected == Float.class && explicit && internal) { + return Cast.boxTo(double.class, float.class, true, float.class); + } else if (expected == Double.class && internal) { + return Cast.boxTo(double.class, double.class, explicit, double.class); } - } else if (actual.clazz == Boolean.class) { - if (expected.clazz == boolean.class && internal) { - return Cast.unboxFrom(definition.booleanType, definition.booleanType, explicit, definition.booleanType); + } else if (actual == Boolean.class) { + if (expected == boolean.class && internal) { + return Cast.unboxFrom(boolean.class, boolean.class, explicit, boolean.class); } - } else if (actual.clazz == Byte.class) { - if (expected.clazz == byte.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.byteType, explicit, definition.byteType); - } else if (expected.clazz == short.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.shortType, explicit, definition.byteType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.byteType, definition.charType, true, definition.byteType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.intType, explicit, definition.byteType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.longType, explicit, definition.byteType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.floatType, explicit, definition.byteType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.byteType, definition.doubleType, explicit, definition.byteType); + } else if (actual == Byte.class) { + if (expected == byte.class && internal) { + return Cast.unboxFrom(byte.class, byte.class, explicit, byte.class); + } else if (expected == short.class && internal) { + return Cast.unboxFrom(byte.class, short.class, explicit, byte.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(byte.class, char.class, true, byte.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(byte.class, int.class, explicit, byte.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(byte.class, long.class, explicit, byte.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(byte.class, float.class, explicit, byte.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(byte.class, double.class, explicit, byte.class); } - } else if (actual.clazz == Short.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.shortType, definition.byteType, true, definition.shortType); - } else if (expected.clazz == short.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.shortType, explicit, definition.shortType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.shortType, definition.charType, true, definition.shortType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.intType, explicit, definition.shortType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.longType, explicit, definition.shortType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.floatType, explicit, definition.shortType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.shortType, definition.doubleType, explicit, definition.shortType); + } else if (actual == Short.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(short.class, byte.class, true, short.class); + } else if (expected == short.class && internal) { + return Cast.unboxFrom(short.class, short.class, explicit, short.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(short.class, char.class, true, short.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(short.class, int.class, explicit, short.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(short.class, long.class, explicit, short.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(short.class, float.class, explicit, short.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(short.class, double.class, explicit, short.class); } - } else if (actual.clazz == Character.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.charType, definition.byteType, true, definition.charType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.charType, definition.shortType, true, definition.charType); - } else if (expected.clazz == char.class && internal) { - return Cast.unboxFrom(definition.charType, definition.charType, explicit, definition.charType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.charType, definition.intType, explicit, definition.charType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.charType, definition.longType, explicit, definition.charType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.charType, definition.floatType, explicit, definition.charType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.charType, definition.doubleType, explicit, definition.charType); + } else if (actual == Character.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(char.class, byte.class, true, char.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(char.class, short.class, true, char.class); + } else if (expected == char.class && internal) { + return Cast.unboxFrom(char.class, char.class, explicit, char.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(char.class, int.class, explicit, char.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(char.class, long.class, explicit, char.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(char.class, float.class, explicit, char.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(char.class, double.class, explicit, char.class); } - } else if (actual.clazz == Integer.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.byteType, true, definition.intType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.shortType, true, definition.intType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.intType, definition.charType, true, definition.intType); - } else if (expected.clazz == int.class && internal) { - return Cast.unboxFrom(definition.intType, definition.intType, explicit, definition.intType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.intType, definition.longType, explicit, definition.intType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.intType, definition.floatType, explicit, definition.intType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.intType, definition.doubleType, explicit, definition.intType); + } else if (actual == Integer.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(int.class, byte.class, true, int.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(int.class, short.class, true, int.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(int.class, char.class, true, int.class); + } else if (expected == int.class && internal) { + return Cast.unboxFrom(int.class, int.class, explicit, int.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(int.class, long.class, explicit, int.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(int.class, float.class, explicit, int.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(int.class, double.class, explicit, int.class); } - } else if (actual.clazz == Long.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.byteType, true, definition.longType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.shortType, true, definition.longType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.charType, true, definition.longType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.longType, definition.intType, true, definition.longType); - } else if (expected.clazz == long.class && internal) { - return Cast.unboxFrom(definition.longType, definition.longType, explicit, definition.longType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.longType, definition.floatType, explicit, definition.longType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.longType, definition.doubleType, explicit, definition.longType); + } else if (actual == Long.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(long.class, byte.class, true, long.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(long.class, short.class, true, long.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(long.class, char.class, true, long.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(long.class, int.class, true, long.class); + } else if (expected == long.class && internal) { + return Cast.unboxFrom(long.class, long.class, explicit, long.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(long.class, float.class, explicit, long.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(long.class, double.class, explicit, long.class); } - } else if (actual.clazz == Float.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.byteType, true, definition.floatType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.shortType, true, definition.floatType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.charType, true, definition.floatType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.intType, true, definition.floatType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxFrom(definition.floatType, definition.longType, true, definition.floatType); - } else if (expected.clazz == float.class && internal) { - return Cast.unboxFrom(definition.floatType, definition.floatType, explicit, definition.floatType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.floatType, definition.doubleType, explicit, definition.floatType); + } else if (actual == Float.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(float.class, byte.class, true, float.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(float.class, short.class, true, float.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(float.class, char.class, true, float.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(float.class, int.class, true, float.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxFrom(float.class, long.class, true, float.class); + } else if (expected == float.class && internal) { + return Cast.unboxFrom(float.class, float.class, explicit, float.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(float.class, double.class, explicit, float.class); } - } else if (actual.clazz == Double.class) { - if (expected.clazz == byte.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.byteType, true, definition.doubleType); - } else if (expected.clazz == short.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.shortType, true, definition.doubleType); - } else if (expected.clazz == char.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.charType, true, definition.doubleType); - } else if (expected.clazz == int.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.intType, true, definition.doubleType); - } else if (expected.clazz == long.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.longType, true, definition.doubleType); - } else if (expected.clazz == float.class && explicit && internal) { - return Cast.unboxFrom(definition.doubleType, definition.floatType, true, definition.doubleType); - } else if (expected.clazz == double.class && internal) { - return Cast.unboxFrom(definition.doubleType, definition.doubleType, explicit, definition.doubleType); + } else if (actual == Double.class) { + if (expected == byte.class && explicit && internal) { + return Cast.unboxFrom(double.class, byte.class, true, double.class); + } else if (expected == short.class && explicit && internal) { + return Cast.unboxFrom(double.class, short.class, true, double.class); + } else if (expected == char.class && explicit && internal) { + return Cast.unboxFrom(double.class, char.class, true, double.class); + } else if (expected == int.class && explicit && internal) { + return Cast.unboxFrom(double.class, int.class, true, double.class); + } else if (expected == long.class && explicit && internal) { + return Cast.unboxFrom(double.class, long.class, true, double.class); + } else if (expected == float.class && explicit && internal) { + return Cast.unboxFrom(double.class, float.class, true, double.class); + } else if (expected == double.class && internal) { + return Cast.unboxFrom(double.class, double.class, explicit, double.class); } } - if ( actual.dynamic || - (actual.clazz != void.class && expected.dynamic) || - expected.clazz.isAssignableFrom(actual.clazz) || - (actual.clazz.isAssignableFrom(expected.clazz) && explicit)) { + if ( actual == def.class || + (actual != void.class && expected == def.class) || + expected.isAssignableFrom(actual) || + (actual.isAssignableFrom(expected) && explicit)) { return Cast.standard(actual, expected, explicit); } else { - throw location.createError(new ClassCastException("Cannot cast from [" + actual.name + "] to [" + expected.name + "].")); + throw location.createError(new ClassCastException( + "Cannot cast from [" + Definition.ClassToName(actual) + "] to [" + Definition.ClassToName(expected) + "].")); } } public Object constCast(Location location, final Object constant, final Cast cast) { - Class fsort = cast.from.clazz; - Class tsort = cast.to.clazz; + Class fsort = cast.from; + Class tsort = cast.to; if (fsort == tsort) { return constant; @@ -502,11 +515,11 @@ public Object constCast(Location location, final Object constant, final Cast cas else if (tsort == double.class) return number.doubleValue(); else { throw location.createError(new IllegalStateException("Cannot cast from " + - "[" + cast.from.clazz.getCanonicalName() + "] to [" + cast.to.clazz.getCanonicalName() + "].")); + "[" + cast.from.getCanonicalName() + "] to [" + cast.to.getCanonicalName() + "].")); } } else { throw location.createError(new IllegalStateException("Cannot cast from " + - "[" + cast.from.clazz.getCanonicalName() + "] to [" + cast.to.clazz.getCanonicalName() + "].")); + "[" + cast.from.getCanonicalName() + "] to [" + cast.to.getCanonicalName() + "].")); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 7729c5319ea81..52f0c2c63302d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -27,6 +27,7 @@ import java.lang.invoke.MethodType; import java.lang.reflect.Modifier; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -76,6 +77,13 @@ public final class Definition { public final Type ArrayListType; public final Type HashMapType; + /** Marker class for def type to be used during type analysis. */ + public static final class def { + private def() { + + } + } + public static final class Type { public final String name; public final int dimensions; @@ -365,40 +373,41 @@ public Method getFunctionalMethod() { } public static class Cast { + /** Create a standard cast with no boxing/unboxing. */ - public static Cast standard(Type from, Type to, boolean explicit) { + public static Cast standard(Class from, Class to, boolean explicit) { return new Cast(from, to, explicit, null, null, null, null); } /** Create a cast where the from type will be unboxed, and then the cast will be performed. */ - public static Cast unboxFrom(Type from, Type to, boolean explicit, Type unboxFrom) { + public static Cast unboxFrom(Class from, Class to, boolean explicit, Class unboxFrom) { return new Cast(from, to, explicit, unboxFrom, null, null, null); } /** Create a cast where the to type will be unboxed, and then the cast will be performed. */ - public static Cast unboxTo(Type from, Type to, boolean explicit, Type unboxTo) { + public static Cast unboxTo(Class from, Class to, boolean explicit, Class unboxTo) { return new Cast(from, to, explicit, null, unboxTo, null, null); } /** Create a cast where the from type will be boxed, and then the cast will be performed. */ - public static Cast boxFrom(Type from, Type to, boolean explicit, Type boxFrom) { + public static Cast boxFrom(Class from, Class to, boolean explicit, Class boxFrom) { return new Cast(from, to, explicit, null, null, boxFrom, null); } /** Create a cast where the to type will be boxed, and then the cast will be performed. */ - public static Cast boxTo(Type from, Type to, boolean explicit, Type boxTo) { + public static Cast boxTo(Class from, Class to, boolean explicit, Class boxTo) { return new Cast(from, to, explicit, null, null, null, boxTo); } - public final Type from; - public final Type to; + public final Class from; + public final Class to; public final boolean explicit; - public final Type unboxFrom; - public final Type unboxTo; - public final Type boxFrom; - public final Type boxTo; + public final Class unboxFrom; + public final Class unboxTo; + public final Class boxFrom; + public final Class boxTo; - private Cast(Type from, Type to, boolean explicit, Type unboxFrom, Type unboxTo, Type boxFrom, Type boxTo) { + private Cast(Class from, Class to, boolean explicit, Class unboxFrom, Class unboxTo, Class boxFrom, Class boxTo) { this.from = from; this.to = to; this.explicit = explicit; @@ -499,6 +508,92 @@ public static boolean isConstantType(Type constant) { constant.clazz == String.class; } + public static Class ObjectClassTodefClass(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == Object.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + String descriptor = new String(braces) + org.objectweb.asm.Type.getType(def.class).getDescriptor(); + org.objectweb.asm.Type type = org.objectweb.asm.Type.getType(descriptor); + + try { + return Class.forName(type.getInternalName().replace('/', '.')); + } catch (ClassNotFoundException exception) { + throw new IllegalStateException("internal error", exception); + } + } + } else if (clazz == Object.class) { + return def.class; + } + + return clazz; + } + + public static Class defClassToObjectClass(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + String descriptor = new String(braces) + org.objectweb.asm.Type.getType(Object.class).getDescriptor(); + org.objectweb.asm.Type type = org.objectweb.asm.Type.getType(descriptor); + + try { + return Class.forName(type.getInternalName().replace('/', '.')); + } catch (ClassNotFoundException exception) { + throw new IllegalStateException("internal error", exception); + } + } + } else if (clazz == def.class) { + return Object.class; + } + + return clazz; + } + + public static String ClassToName(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + StringBuilder builder = new StringBuilder("def"); + + for (int dimension = 0; dimension < dimensions; dimensions++) { + builder.append("[]"); + } + + return builder.toString(); + } + } else if (clazz == def.class) { + return "def"; + } + + return clazz.getCanonicalName(); + } + public RuntimeClass getRuntimeClass(Class clazz) { return runtimeMap.get(clazz); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index b0c15abbfb0d5..7925856656e15 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -20,15 +20,17 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition.def; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.GeneratorAdapter; import org.objectweb.asm.commons.Method; import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Arrays; import java.util.BitSet; import java.util.Deque; import java.util.List; @@ -128,68 +130,68 @@ public void writeLoopCounter(int slot, int count, Location location) { mark(end); } - public void writeCast(final Cast cast) { + public void writeCast(Cast cast) { if (cast != null) { - if (cast.from.clazz == char.class && cast.to.clazz == String.class) { + if (cast.from == char.class && cast.to == String.class) { invokeStatic(UTILITY_TYPE, CHAR_TO_STRING); - } else if (cast.from.clazz == String.class && cast.to.clazz == char.class) { + } else if (cast.from == String.class && cast.to == char.class) { invokeStatic(UTILITY_TYPE, STRING_TO_CHAR); } else if (cast.unboxFrom != null) { - unbox(cast.unboxFrom.type); + unbox(getType(cast.unboxFrom)); writeCast(cast.from, cast.to); } else if (cast.unboxTo != null) { - if (cast.from.dynamic) { + if (cast.from == def.class) { if (cast.explicit) { - if (cast.to.clazz == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (cast.to.clazz == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); - else if (cast.to.clazz == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); - else if (cast.to.clazz == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); - else if (cast.to.clazz == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); - else if (cast.to.clazz == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); - else if (cast.to.clazz == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); - else if (cast.to.clazz == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); + if (cast.to == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_EXPLICIT); + else if (cast.to == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_EXPLICIT); + else if (cast.to == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_EXPLICIT); + else if (cast.to == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_EXPLICIT); + else if (cast.to == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_EXPLICIT); + else if (cast.to == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_EXPLICIT); + else if (cast.to == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_EXPLICIT); else { throw new IllegalStateException("Illegal tree structure."); } } else { - if (cast.to.clazz == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); - else if (cast.to.clazz == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); - else if (cast.to.clazz == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); - else if (cast.to.clazz == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); - else if (cast.to.clazz == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); - else if (cast.to.clazz == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); - else if (cast.to.clazz == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); - else if (cast.to.clazz == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); + if (cast.to == Boolean.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BOOLEAN); + else if (cast.to == Byte.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_BYTE_IMPLICIT); + else if (cast.to == Short.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_SHORT_IMPLICIT); + else if (cast.to == Character.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_CHAR_IMPLICIT); + else if (cast.to == Integer.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_INT_IMPLICIT); + else if (cast.to == Long.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_LONG_IMPLICIT); + else if (cast.to == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_FLOAT_IMPLICIT); + else if (cast.to == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_DOUBLE_IMPLICIT); else { throw new IllegalStateException("Illegal tree structure."); } } } else { writeCast(cast.from, cast.to); - unbox(cast.unboxTo.type); + unbox(getType(cast.unboxTo)); } } else if (cast.boxFrom != null) { - box(cast.boxFrom.type); + box(getType(cast.boxFrom)); writeCast(cast.from, cast.to); } else if (cast.boxTo != null) { writeCast(cast.from, cast.to); - box(cast.boxTo.type); + box(getType(cast.boxTo)); } else { writeCast(cast.from, cast.to); } } } - private void writeCast(final Type from, final Type to) { + private void writeCast(Class from, Class to) { if (from.equals(to)) { return; } - if (from.clazz != boolean.class && from.clazz.isPrimitive() && to.clazz != boolean.class && to.clazz.isPrimitive()) { - cast(from.type, to.type); + if (from != boolean.class && from.isPrimitive() && to != boolean.class && to.isPrimitive()) { + cast(getType(from), getType(to)); } else { - if (!to.clazz.isAssignableFrom(from.clazz)) { - checkCast(to.type); + if (!to.isAssignableFrom(from)) { + checkCast(getType(to)); } } } @@ -202,6 +204,29 @@ public void box(org.objectweb.asm.Type type) { valueOf(type); } + public static Type getType(Class clazz) { + if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (component == def.class) { + char[] braces = new char[dimensions]; + Arrays.fill(braces, '['); + + return Type.getType(new String(braces) + Type.getType(Object.class).getDescriptor()); + } + } else if (clazz == def.class) { + return Type.getType(Object.class); + } + + return Type.getType(clazz); + } + public void writeBranch(final Label tru, final Label fals) { if (tru != null) { visitJumpInsn(Opcodes.IFNE, tru); @@ -227,7 +252,7 @@ public int writeNewStrings() { } } - public void writeAppendStrings(final Type type) { + public void writeAppendStrings(final Definition.Type type) { if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { // Java 9+: record type information stringConcatArgs.peek().add(type.type); @@ -267,7 +292,7 @@ public void writeToStrings() { } /** Writes a dynamic binary instruction: returnType, lhs, and rhs can be different */ - public void writeDynamicBinaryInstruction(Location location, Type returnType, Type lhs, Type rhs, + public void writeDynamicBinaryInstruction(Location location, Definition.Type returnType, Definition.Type lhs, Definition.Type rhs, Operation operation, int flags) { org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType(returnType.type, lhs.type, rhs.type); @@ -318,7 +343,7 @@ public void writeDynamicBinaryInstruction(Location location, Type returnType, Ty } /** Writes a static binary instruction */ - public void writeBinaryInstruction(Location location, Type type, Operation operation) { + public void writeBinaryInstruction(Location location, Definition.Type type, Operation operation) { if ((type.clazz == float.class || type.clazz == double.class) && (operation == Operation.LSH || operation == Operation.USH || operation == Operation.RSH || operation == Operation.BWAND || diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java index 5c2a149876139..42ec197c7f5f3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless.node; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import java.util.Objects; @@ -63,6 +64,6 @@ void write(MethodWriter writer, Globals globals) { @Override public String toString() { - return singleLineToString(cast.to, child); + return singleLineToString(Definition.ClassToName(cast.to), child); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 919b0881c0794..b8fe248601764 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -39,8 +39,8 @@ private static void assertCast(Type actual, Type expected, boolean mustBeExplici } Cast cast = definition.caster.getLegalCast(location, actual, expected, true, false); - assertEquals(actual, cast.from); - assertEquals(expected, cast.to); + assertEquals(actual.clazz, cast.from); + assertEquals(expected.clazz, cast.to); if (mustBeExplicit) { ClassCastException error = expectThrows(ClassCastException.class, @@ -48,8 +48,8 @@ private static void assertCast(Type actual, Type expected, boolean mustBeExplici assertTrue(error.getMessage().startsWith("Cannot cast")); } else { cast = definition.caster.getLegalCast(location, actual, expected, false, false); - assertEquals(actual, cast.from); - assertEquals(expected, cast.to); + assertEquals(actual.clazz, cast.from); + assertEquals(expected.clazz, cast.to); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 424b0c286ecff..fb1a004e3cd40 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -162,12 +162,12 @@ public void testECapturingFunctionRef() { public void testECast() { Location l = new Location(getTestName(), 0); AExpression child = new EConstant(l, "test"); - Cast cast = Cast.standard(definition.StringType, definition.IntegerType, true); + Cast cast = Cast.standard(String.class, Integer.class, true); assertEquals("(ECast java.lang.Integer (EConstant String 'test'))", new ECast(l, child, cast).toString()); l = new Location(getTestName(), 1); child = new EBinary(l, Operation.ADD, new EConstant(l, "test"), new EConstant(l, 12)); - cast = Cast.standard(definition.IntegerType, definition.BooleanType, true); + cast = Cast.standard(Integer.class, Boolean.class, true); assertEquals("(ECast java.lang.Boolean (EBinary (EConstant String 'test') + (EConstant Integer 12)))", new ECast(l, child, cast).toString()); } From 4ef341a0c3925d28ad4ab007b42a3b79cb34af2c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 23 Jan 2018 09:06:02 +0100 Subject: [PATCH 84/94] Revert change that does not return all indices if a specific alias is requested via get alias api. (#28294) Reopens #27763 --- .../cluster/metadata/MetaData.java | 5 +---- .../elasticsearch/aliases/IndexAliasesIT.java | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 98afe41c59697..23ed28569d28d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -276,11 +276,8 @@ public ImmutableOpenMap> findAliases(final String[] if (!filteredValues.isEmpty()) { // Make the list order deterministic CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias)); - mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); - } else if (matchAllAliases) { - // in case all aliases are requested then it is desired to return the concrete index with no aliases (#25114): - mapBuilder.put(index, Collections.emptyList()); } + mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } return mapBuilder.build(); } diff --git a/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index dae421db97f31..8bf074be551b1 100644 --- a/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -570,20 +570,24 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting alias1"); GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(5)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("test").isEmpty()); + assertTrue(getResponse.getAliases().get("test123").isEmpty()); + assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get(); assertThat(existsResponse.exists(), equalTo(true)); logger.info("--> getting all aliases that start with alias*"); getResponse = admin().indices().prepareGetAliases("alias*").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(5)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1")); @@ -595,6 +599,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("test").isEmpty()); + assertTrue(getResponse.getAliases().get("test123").isEmpty()); + assertTrue(getResponse.getAliases().get("foobarbaz").isEmpty()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("alias*").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -679,12 +687,13 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bar"); getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("f*") .addIndices("*bar").get(); assertThat(existsResponse.exists(), equalTo(true)); @@ -693,13 +702,14 @@ public void testIndicesGetAliases() throws Exception { logger.info("--> getting f* for index *bac"); getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get(); assertThat(getResponse, notNullValue()); - assertThat(getResponse.getAliases().size(), equalTo(1)); + assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo")); assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); + assertTrue(getResponse.getAliases().get("bazbar").isEmpty()); existsResponse = admin().indices().prepareAliasesExist("foo") .addIndices("*bac").get(); assertThat(existsResponse.exists(), equalTo(true)); From cf61d792b2ed27d69272cd3b5fd6e93200c90379 Mon Sep 17 00:00:00 2001 From: Catalin Ursachi Date: Tue, 23 Jan 2018 10:03:32 +0000 Subject: [PATCH 85/94] Added Put Mapping API to high-level Rest client (#27869) Relates to #27205 --- .../elasticsearch/client/IndicesClient.java | 25 +++++ .../org/elasticsearch/client/Request.java | 21 ++++ .../elasticsearch/client/IndicesClientIT.java | 31 ++++++ .../elasticsearch/client/RequestTests.java | 34 +++++++ .../IndicesClientDocumentationIT.java | 96 +++++++++++++++++-- docs/java-rest/high-level/apis/index.asciidoc | 2 + .../high-level/apis/putmapping.asciidoc | 71 ++++++++++++++ .../high-level/supported-apis.asciidoc | 1 + .../mapping/put/PutMappingRequest.java | 13 ++- .../mapping/put/PutMappingResponse.java | 25 ++++- .../create/CreateIndexRequestTests.java | 4 +- .../mapping/put/PutMappingRequestTests.java | 84 ++++++++++++++++ .../mapping/put/PutMappingResponseTests.java | 85 ++++++++++++++++ 13 files changed, 481 insertions(+), 11 deletions(-) create mode 100644 docs/java-rest/high-level/apis/putmapping.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 2dd130fc6342e..d17f0bf94e3b7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; @@ -89,6 +91,29 @@ public void createAsync(CreateIndexRequest createIndexRequest, ActionListener + * See + * Put Mapping API on elastic.co + */ + public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, + Collections.emptySet(), headers); + } + + /** + * Asynchronously updates the mappings on an index using the Put Mapping API + *

+ * See + * Put Mapping API on elastic.co + */ + public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, + listener, Collections.emptySet(), headers); + } + /** * Opens an index using the Open Index API *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index cc3b0deff52c6..229e45498aa95 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -178,6 +179,22 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); } + static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { + // The concreteIndex is an internal concept, not applicable to requests made over the REST API. + if (putMappingRequest.getConcreteIndex() != null) { + throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); + } + + String endpoint = endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()); + + Params parameters = Params.builder(); + parameters.withTimeout(putMappingRequest.timeout()); + parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); + + HttpEntity entity = createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + static Request info() { return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); } @@ -454,6 +471,10 @@ static String endpoint(String[] indices, String[] types, String endpoint) { return endpoint(String.join(",", indices), String.join(",", types), endpoint); } + static String endpoint(String[] indices, String endpoint, String type) { + return endpoint(String.join(",", indices), endpoint, type); + } + /** * Utility method to build request's endpoint. */ diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5f8702807fb30..2f81479f93a64 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -108,6 +110,35 @@ public void testCreateIndex() throws IOException { } } + @SuppressWarnings("unchecked") + public void testPutMapping() throws IOException { + { + // Add mappings to index + String indexName = "mapping_index"; + createIndex(indexName); + + PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); + putMappingRequest.type("type_name"); + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + putMappingRequest.source(mappingBuilder); + + PutMappingResponse putMappingResponse = + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + assertTrue(putMappingResponse.isAcknowledged()); + + Map indexMetaData = getIndexMetadata(indexName); + Map mappingsData = (Map) indexMetaData.get("mappings"); + Map typeData = (Map) mappingsData.get("type_name"); + Map properties = (Map) typeData.get("properties"); + Map field = (Map) properties.get("field"); + + assertEquals("text", field.get("type")); + } + } + public void testDeleteIndex() throws IOException { { // Delete index if exists diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 49667a3dee289..0ddaf1de1ca52 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; @@ -317,6 +318,39 @@ public void testCreateIndex() throws IOException { assertToXContentBody(createIndexRequest, request.getEntity()); } + public void testPutMapping() throws IOException { + PutMappingRequest putMappingRequest = new PutMappingRequest(); + + int numIndices = randomIntBetween(0, 5); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5); + } + putMappingRequest.indices(indices); + + String type = randomAlphaOfLengthBetween(3, 10); + putMappingRequest.type(type); + + Map expectedParams = new HashMap<>(); + + setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(putMappingRequest, expectedParams); + + Request request = Request.putMapping(putMappingRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + endpoint.add(type); + assertEquals(endpoint.toString(), request.getEndpoint()); + + assertEquals(expectedParams, request.getParameters()); + assertEquals("PUT", request.getMethod()); + assertToXContentBody(putMappingRequest, request.getEntity()); + } + public void testDeleteIndex() { String[] indices = randomIndicesNames(0, 5); DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indices); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index bc3b1698f9679..23029c7c6b007 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; @@ -157,15 +159,15 @@ public void testCreateIndex() throws IOException { // tag::create-index-request-mappings request.mapping("tweet", // <1> - " {\n" + - " \"tweet\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + "{\n" + + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + - " }", // <2> + " }\n" + + "}", // <2> XContentType.JSON); // end::create-index-request-mappings @@ -228,6 +230,86 @@ public void onFailure(Exception e) { } } + public void testPutMapping() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::put-mapping-request + PutMappingRequest request = new PutMappingRequest("twitter"); // <1> + request.type("tweet"); // <2> + // end::put-mapping-request + + // tag::put-mapping-request-source + request.source( + "{\n" + + " \"tweet\": {\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}", // <1> + XContentType.JSON); + // end::put-mapping-request-source + + // tag::put-mapping-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-mapping-request-timeout + // tag::put-mapping-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-mapping-request-masterTimeout + + // tag::put-mapping-execute + PutMappingResponse putMappingResponse = client.indices().putMapping(request); + // end::put-mapping-execute + + // tag::put-mapping-response + boolean acknowledged = putMappingResponse.isAcknowledged(); // <1> + // end::put-mapping-response + assertTrue(acknowledged); + } + } + + public void testPutMappingAsync() throws Exception { + final RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + PutMappingRequest request = new PutMappingRequest("twitter").type("tweet"); + // tag::put-mapping-execute-async + client.indices().putMappingAsync(request, new ActionListener() { + @Override + public void onResponse(PutMappingResponse putMappingResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::put-mapping-execute-async + + assertBusy(() -> { + // TODO Use Indices Exist API instead once it exists + Response response = client.getLowLevelClient().performRequest("HEAD", "twitter"); + assertTrue(RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode()); + }); + } + } + public void testOpenIndex() throws IOException { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index f6da998a8476f..f7367b6e8c26d 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -6,6 +6,8 @@ include::open_index.asciidoc[] include::close_index.asciidoc[] +include::putmapping.asciidoc[] + include::_index.asciidoc[] include::get.asciidoc[] diff --git a/docs/java-rest/high-level/apis/putmapping.asciidoc b/docs/java-rest/high-level/apis/putmapping.asciidoc new file mode 100644 index 0000000000000..57b8ec8964a9a --- /dev/null +++ b/docs/java-rest/high-level/apis/putmapping.asciidoc @@ -0,0 +1,71 @@ +[[java-rest-high-put-mapping]] +=== Put Mapping API + +[[java-rest-high-put-mapping-request]] +==== Put Mapping Request + +A `PutMappingRequest` requires an `index` argument, and a type: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request] +-------------------------------------------------- +<1> The index to add the mapping to +<2> The type to create (or update) + +==== Mapping source +A description of the fields to create on the mapping; if not defined, the mapping will default to empty. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-source] +-------------------------------------------------- +<1> The mapping source + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-put-mapping-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute] +-------------------------------------------------- + +[[java-rest-high-put-mapping-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-put-mapping-response]] +==== Put Mapping Response + +The returned `PutMappingResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-mapping-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index fa71b62d64e70..aede4789f4dec 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -8,6 +8,7 @@ Indices APIs:: * <> * <> * <> +* <> Single document APIs:: * <> diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7b6c8f6eb6f40..03c1308e1d9fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,7 +58,7 @@ * @see org.elasticsearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see PutMappingResponse */ -public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { +public class PutMappingRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { private static ObjectHashSet RESERVED_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_source", "_all", "_analyzer", "_parent", "_routing", "_index", @@ -318,4 +319,14 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalWriteable(concreteIndex); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (source != null) { + builder.rawValue(new BytesArray(source), XContentType.JSON); + } else { + builder.startObject().endObject(); + } + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java index 64b3c77f05067..f427a316c2e81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java @@ -22,13 +22,24 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * The response of put mapping operation. */ -public class PutMappingResponse extends AcknowledgedResponse { +public class PutMappingResponse extends AcknowledgedResponse implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_mapping", + true, args -> new PutMappingResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } protected PutMappingResponse() { @@ -49,4 +60,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static PutMappingResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index 41691f70c06f3..d7553ebf07cda 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -115,7 +115,7 @@ public void testToAndFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(createIndexRequest, xContentType, EMPTY_PARAMS, humanReadable); - CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(createIndexRequest.index()); + CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(); parsedCreateIndexRequest.source(originalBytes, xContentType); assertMappingsEqual(createIndexRequest.mappings(), parsedCreateIndexRequest.mappings()); @@ -201,7 +201,7 @@ private static XContentBuilder randomMapping(String type) throws IOException { return builder; } - private static void randomMappingFields(XContentBuilder builder, boolean allowObjectField) throws IOException { + public static void randomMappingFields(XContentBuilder builder, boolean allowObjectField) throws IOException { builder.startObject("properties"); int fieldsNo = randomIntBetween(0, 5); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 96dcef700a956..902dc1870934c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -21,17 +21,26 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestTests; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; + public class PutMappingRequestTests extends ESTestCase { public void testValidation() { @@ -94,4 +103,79 @@ public void testPutMappingRequestSerialization() throws IOException { } } } + + public void testToXContent() throws IOException { + PutMappingRequest request = new PutMappingRequest("foo"); + request.type("my_type"); + + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("properties"); + mapping.startObject("email"); + mapping.field("type", "text"); + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + request.source(mapping); + + String actualRequestBody = Strings.toString(request); + String expectedRequestBody = "{\"properties\":{\"email\":{\"type\":\"text\"}}}"; + assertEquals(expectedRequestBody, actualRequestBody); + } + + public void testToXContentWithEmptySource() throws IOException { + PutMappingRequest request = new PutMappingRequest("foo"); + request.type("my_type"); + + String actualRequestBody = Strings.toString(request); + String expectedRequestBody = "{}"; + assertEquals(expectedRequestBody, actualRequestBody); + } + + public void testToAndFromXContent() throws IOException { + + final PutMappingRequest putMappingRequest = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(putMappingRequest, xContentType, EMPTY_PARAMS, humanReadable); + + PutMappingRequest parsedPutMappingRequest = new PutMappingRequest(); + parsedPutMappingRequest.source(originalBytes, xContentType); + + assertMappingsEqual(putMappingRequest.source(), parsedPutMappingRequest.source()); + } + + private void assertMappingsEqual(String expected, String actual) throws IOException { + + XContentParser expectedJson = createParser(XContentType.JSON.xContent(), expected); + XContentParser actualJson = createParser(XContentType.JSON.xContent(), actual); + assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); + } + + /** + * Returns a random {@link PutMappingRequest}. + */ + private static PutMappingRequest createTestItem() throws IOException { + String index = randomAlphaOfLength(5); + + PutMappingRequest request = new PutMappingRequest(index); + + String type = randomAlphaOfLength(5); + request.type(type); + request.source(randomMapping()); + + return request; + } + + private static XContentBuilder randomMapping() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + + if (randomBoolean()) { + CreateIndexRequestTests.randomMappingFields(builder, true); + } + + builder.endObject(); + return builder; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java new file mode 100644 index 0000000000000..a52969c628106 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponseTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.put; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; + +public class PutMappingResponseTests extends ESTestCase { + + public void testToXContent() { + PutMappingResponse response = new PutMappingResponse(true); + String output = Strings.toString(response); + assertEquals("{\"acknowledged\":true}", output); + } + + public void testToAndFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { + + final PutMappingResponse putMappingResponse = createTestItem(); + + boolean humanReadable = randomBoolean(); + final XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(putMappingResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + + BytesReference mutated; + if (addRandomFields) { + mutated = insertRandomFields(xContentType, originalBytes, null, random()); + } else { + mutated = originalBytes; + } + PutMappingResponse parsedPutMappingResponse; + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + parsedPutMappingResponse = PutMappingResponse.fromXContent(parser); + assertNull(parser.nextToken()); + } + + assertEquals(putMappingResponse.isAcknowledged(), parsedPutMappingResponse.isAcknowledged()); + } + + /** + * Returns a random {@link PutMappingResponse}. + */ + private static PutMappingResponse createTestItem() throws IOException { + boolean acknowledged = randomBoolean(); + + return new PutMappingResponse(acknowledged); + } +} From ba9e2e44cb32f1181dc47b3a6cc5cacac75600a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 23 Jan 2018 13:08:54 +0100 Subject: [PATCH 86/94] [Test] Re-Add integer_range and date_range field types for query builder tests (#28171) The tests for those field types were removed in #26549 because the range mapper was moved to a module, but later this mapper was moved back to core in #27854. This change adds back those two field types like before to the general setup in AbstractQueryTestCase and adds some specifics to the RangeQueryBuilder and TermsQueryBuilder tests. Also adding back an integration test in SearchQueryIT that has been removed before but that can be kept with the mapper back in core now. Relates to #28147 --- .../index/query/RangeQueryBuilderTests.java | 14 +++++++++++--- .../index/query/TermsQueryBuilderTests.java | 7 +++---- .../elasticsearch/search/query/SearchQueryIT.java | 14 ++++++++++++++ .../elasticsearch/test/AbstractQueryTestCase.java | 12 ++++++++---- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index 2230436b18ef4..3668c7dec17a0 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -65,13 +65,13 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { switch (randomIntBetween(0, 2)) { case 0: // use mapped integer field for numeric range queries - query = new RangeQueryBuilder(INT_FIELD_NAME); + query = new RangeQueryBuilder(randomBoolean() ? INT_FIELD_NAME : INT_RANGE_FIELD_NAME); query.from(randomIntBetween(1, 100)); query.to(randomIntBetween(101, 200)); break; case 1: // use mapped date field, using date string representation - query = new RangeQueryBuilder(DATE_FIELD_NAME); + query = new RangeQueryBuilder(randomBoolean() ? DATE_FIELD_NAME : DATE_RANGE_FIELD_NAME); query.from(new DateTime(System.currentTimeMillis() - randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); query.to(new DateTime(System.currentTimeMillis() + randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); // Create timestamp option only then we have a date mapper, @@ -99,6 +99,10 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.to(null); } + if (query.fieldName().equals(INT_RANGE_FIELD_NAME) || query.fieldName().equals(DATE_RANGE_FIELD_NAME)) { + query.relation( + randomFrom(ShapeRelation.CONTAINS.toString(), ShapeRelation.INTERSECTS.toString(), ShapeRelation.WITHIN.toString())); + } return query; } @@ -143,7 +147,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, } else if (getCurrentTypes().length == 0 || (queryBuilder.fieldName().equals(DATE_FIELD_NAME) == false - && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false)) { + && queryBuilder.fieldName().equals(INT_FIELD_NAME) == false + && queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) == false + && queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME) == false)) { assertThat(query, instanceOf(TermRangeQuery.class)); TermRangeQuery termRangeQuery = (TermRangeQuery) query; assertThat(termRangeQuery.getField(), equalTo(queryBuilder.fieldName())); @@ -219,6 +225,8 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, maxInt--; } } + } else if (queryBuilder.fieldName().equals(DATE_RANGE_FIELD_NAME) || queryBuilder.fieldName().equals(INT_RANGE_FIELD_NAME)) { + // todo can't check RangeFieldQuery because its currently package private (this will change) } else { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 79f9af61408b2..c945e595213fd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointInSetQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.get.GetRequest; @@ -77,9 +77,8 @@ protected TermsQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { // make between 0 and 5 different values of the same type String fieldName; - do { - fieldName = getRandomFieldName(); - } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME)); + fieldName = randomValueOtherThanMany(choice -> choice.equals(GEO_POINT_FIELD_NAME) || choice.equals(GEO_SHAPE_FIELD_NAME) + || choice.equals(INT_RANGE_FIELD_NAME) || choice.equals(DATE_RANGE_FIELD_NAME), () -> getRandomFieldName()); Object[] values = new Object[randomInt(5)]; for (int i = 0; i < values.length; i++) { values[i] = getRandomValueForFieldName(fieldName); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 4b56d2bc9e1fe..c3f1da82c7984 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; @@ -1893,4 +1894,17 @@ public void testQueryStringParserCache() throws Exception { } } + public void testRangeQueryRangeFields_24744() throws Exception { + assertAcked(prepareCreate("test").addMapping("type1", "int_range", "type=integer_range")); + + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) + .get(); + refresh(); + + RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); + assertHitCount(searchResponse, 1); + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 4d30bddb3a45f..f8b1572fa09cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -138,17 +138,19 @@ public abstract class AbstractQueryTestCase> public static final String STRING_FIELD_NAME = "mapped_string"; protected static final String STRING_FIELD_NAME_2 = "mapped_string_2"; protected static final String INT_FIELD_NAME = "mapped_int"; + protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range"; protected static final String DOUBLE_FIELD_NAME = "mapped_double"; protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; protected static final String DATE_FIELD_NAME = "mapped_date"; + protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; - protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, + protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME}; - private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; + private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; private static final int NUMBER_OF_TESTQUERIES = 20; protected static Version indexVersionCreated; @@ -1077,9 +1079,11 @@ public void onRemoval(ShardId shardId, Accountable accountable) { STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME_2, "type=keyword", INT_FIELD_NAME, "type=integer", + INT_RANGE_FIELD_NAME, "type=integer_range", DOUBLE_FIELD_NAME, "type=double", BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", + DATE_RANGE_FIELD_NAME, "type=date_range", OBJECT_FIELD_NAME, "type=object", GEO_POINT_FIELD_NAME, "type=geo_point", GEO_SHAPE_FIELD_NAME, "type=geo_shape" From d31e964a869432dc506578d744f2a4d3465f0ea3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 23 Jan 2018 14:50:02 +0100 Subject: [PATCH 87/94] Provide a better error message for the case when all shards failed (#28333) Today we don't specify a cause which can make debugging very very tricky. This change is best effort to supply at least one cause for the failure. --- .../action/search/AbstractSearchAsyncAction.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 4632ef63174a2..b9e9f1ec483d2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -122,14 +122,14 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha * at least one successful operation left and if so we move to the next phase. If not we immediately fail the * search phase as "all shards failed"*/ if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure + final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); + Throwable cause = shardSearchFailures.length == 0 ? null : + ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; if (logger.isDebugEnabled()) { - final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); - Throwable cause = shardSearchFailures.length == 0 ? null : - ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); } - onPhaseFailure(currentPhase, "all shards failed", null); + onPhaseFailure(currentPhase, "all shards failed", cause); } else { if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() From 19cfc258734e633889c130e7b13961c827af6249 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 23 Jan 2018 15:14:49 +0100 Subject: [PATCH 88/94] Adds the ability to specify a format on composite date_histogram source (#28310) This commit adds the ability to specify a date format on the `date_histogram` composite source. If the format is defined, the key for the source is returned as a formatted date. Closes #27923 --- .../bucket/composite-aggregation.asciidoc | 36 +++++++- .../test/search.aggregation/230_composite.yml | 83 ++++++++++++++++- .../CompositeAggregationBuilder.java | 6 +- .../CompositeAggregationFactory.java | 7 +- .../bucket/composite/CompositeAggregator.java | 17 ++-- .../composite/CompositeValuesComparator.java | 2 +- .../composite/CompositeValuesSource.java | 18 +++- .../CompositeValuesSourceBuilder.java | 36 +++++++- .../CompositeValuesSourceConfig.java | 22 ++++- .../DateHistogramValuesSourceBuilder.java | 13 ++- .../HistogramValuesSourceBuilder.java | 4 +- .../bucket/composite/InternalComposite.java | 88 +++++++++++++++---- .../composite/TermsValuesSourceBuilder.java | 2 +- .../composite/CompositeAggregatorTests.java | 87 ++++++++++++++++++ .../composite/InternalCompositeTests.java | 56 +++++++----- 15 files changed, 401 insertions(+), 76 deletions(-) diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 2e4b9a1101108..438eb5afc0162 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -225,7 +225,41 @@ Note that fractional time values are not supported, but you can address this by time unit (e.g., `1.5h` could instead be specified as `90m`). [float] -===== Time Zone +====== Format + +Internally, a date is represented as a 64 bit number representing a timestamp in milliseconds-since-the-epoch. +These timestamps are returned as the bucket keys. It is possible to return a formatted date string instead using +the format specified with the format parameter: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "my_buckets": { + "composite" : { + "sources" : [ + { + "date": { + "date_histogram" : { + "field": "timestamp", + "interval": "1d", + "format": "yyyy-MM-dd" <1> + } + } + } + ] + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Supports expressive date <> + +[float] +====== Time Zone Date-times are stored in Elasticsearch in UTC. By default, all bucketing and rounding is also done in UTC. The `time_zone` parameter can be used to indicate diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index aaf277d171ba0..e094c47ff422b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -7,6 +7,8 @@ setup: mappings: doc: properties: + date: + type: date keyword: type: keyword long: @@ -40,6 +42,20 @@ setup: id: 4 body: { "keyword": "bar", "long": [1000, 0] } + - do: + index: + index: test + type: doc + id: 5 + body: { "date": "2017-10-20T03:08:45" } + + - do: + index: + index: test + type: doc + id: 6 + body: { "date": "2017-10-21T07:00:00" } + - do: indices.refresh: index: [test] @@ -66,7 +82,7 @@ setup: } ] - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 2 } - match: { aggregations.test.buckets.0.key.kw: "bar" } - match: { aggregations.test.buckets.0.doc_count: 3 } @@ -104,7 +120,7 @@ setup: } ] - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 5 } - match: { aggregations.test.buckets.0.key.long: 0} - match: { aggregations.test.buckets.0.key.kw: "bar" } @@ -154,7 +170,7 @@ setup: ] after: { "long": 20, "kw": "foo" } - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 2 } - match: { aggregations.test.buckets.0.key.long: 100 } - match: { aggregations.test.buckets.0.key.kw: "bar" } @@ -188,7 +204,7 @@ setup: ] after: { "kw": "delta" } - - match: {hits.total: 4} + - match: {hits.total: 6} - length: { aggregations.test.buckets: 1 } - match: { aggregations.test.buckets.0.key.kw: "foo" } - match: { aggregations.test.buckets.0.doc_count: 2 } @@ -220,3 +236,62 @@ setup: } } ] + +--- +"Composite aggregation with format": + - skip: + version: " - 6.99.99" + reason: this uses a new option (format) added in 7.0.0 + + - do: + search: + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 2 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-20" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + + - do: + search: + index: test + body: + aggregations: + test: + composite: + after: { + date: "2017-10-20" + } + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 1 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.0.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 5b36063e17ac0..58a15bbb36684 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -147,17 +147,15 @@ protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory< Sort sort = indexSortConfig.buildIndexSort(shardContext::fieldMapper, shardContext::getForField); System.arraycopy(sort.getSort(), 0, sortFields, 0, sortFields.length); } - List sourceNames = new ArrayList<>(); for (int i = 0; i < configs.length; i++) { configs[i] = sources.get(i).build(context, i, configs.length, sortFields[i]); - sourceNames.add(sources.get(i).name()); if (configs[i].valuesSource().needsScores()) { throw new IllegalArgumentException("[sources] cannot access _score"); } } final CompositeKey afterKey; if (after != null) { - if (after.size() != sources.size()) { + if (after.size() != configs.length) { throw new IllegalArgumentException("[after] has " + after.size() + " value(s) but [sources] has " + sources.size()); } @@ -179,7 +177,7 @@ protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory< } else { afterKey = null; } - return new CompositeAggregationFactory(name, context, parent, subfactoriesBuilder, metaData, size, configs, sourceNames, afterKey); + return new CompositeAggregationFactory(name, context, parent, subfactoriesBuilder, metaData, size, configs, afterKey); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index c0aeb5304a580..2b2fa4fb7e3eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -32,17 +32,14 @@ class CompositeAggregationFactory extends AggregatorFactory { private final int size; private final CompositeValuesSourceConfig[] sources; - private final List sourceNames; private final CompositeKey afterKey; CompositeAggregationFactory(String name, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData, - int size, CompositeValuesSourceConfig[] sources, - List sourceNames, CompositeKey afterKey) throws IOException { + int size, CompositeValuesSourceConfig[] sources, CompositeKey afterKey) throws IOException { super(name, context, parent, subFactoriesBuilder, metaData); this.size = size; this.sources = sources; - this.sourceNames = sourceNames; this.afterKey = afterKey; } @@ -50,6 +47,6 @@ class CompositeAggregationFactory extends AggregatorFactory pipelineAggregators, Map metaData) throws IOException { return new CompositeAggregator(name, factories, context, parent, pipelineAggregators, metaData, - size, sources, sourceNames, afterKey); + size, sources, afterKey); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 3467aaf318baf..e822480f9150d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -43,11 +44,13 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; +import java.util.stream.Collectors; final class CompositeAggregator extends BucketsAggregator { private final int size; private final CompositeValuesSourceConfig[] sources; private final List sourceNames; + private final List formats; private final boolean canEarlyTerminate; private final TreeMap keys; @@ -59,12 +62,12 @@ final class CompositeAggregator extends BucketsAggregator { CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData, - int size, CompositeValuesSourceConfig[] sources, List sourceNames, - CompositeKey rawAfterKey) throws IOException { + int size, CompositeValuesSourceConfig[] sources, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sources = sources; - this.sourceNames = sourceNames; + this.sourceNames = Arrays.stream(sources).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); + this.formats = Arrays.stream(sources).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); // we use slot 0 to fill the current document (size+1). this.array = new CompositeValuesComparator(context.searcher().getIndexReader(), sources, size+1); if (rawAfterKey != null) { @@ -131,15 +134,17 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException CompositeKey key = array.toCompositeKey(slot); InternalAggregations aggs = bucketAggregations(slot); int docCount = bucketDocCount(slot); - buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, key, reverseMuls, docCount, aggs); + buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } - return new InternalComposite(name, size, sourceNames, Arrays.asList(buckets), reverseMuls, pipelineAggregators(), metaData()); + return new InternalComposite(name, size, sourceNames, formats, Arrays.asList(buckets), reverseMuls, + pipelineAggregators(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { final int[] reverseMuls = getReverseMuls(); - return new InternalComposite(name, size, sourceNames, Collections.emptyList(), reverseMuls, pipelineAggregators(), metaData()); + return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), reverseMuls, + pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java index 849fe2c513e9b..0ce87460a5429 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java @@ -56,7 +56,7 @@ final class CompositeValuesComparator { if (vs.isFloatingPoint()) { arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul); } else { - arrays[i] = CompositeValuesSource.wrapLong(vs, size, reverseMul); + arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul); } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java index 88d54744777e0..2d0368dfd4d28 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSource.java @@ -23,8 +23,10 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.sort.SortOrder; @@ -96,8 +98,9 @@ interface Collector { /** * Creates a {@link CompositeValuesSource} that generates long values. */ - static CompositeValuesSource wrapLong(ValuesSource.Numeric vs, int size, int reverseMul) { - return new LongValuesSource(vs, size, reverseMul); + static CompositeValuesSource wrapLong(ValuesSource.Numeric vs, DocValueFormat format, + int size, int reverseMul) { + return new LongValuesSource(vs, format, size, reverseMul); } /** @@ -273,9 +276,12 @@ Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOE */ private static class LongValuesSource extends CompositeValuesSource { private final long[] values; + // handles "format" for date histogram source + private final DocValueFormat format; - LongValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) { + LongValuesSource(ValuesSource.Numeric vs, DocValueFormat format, int size, int reverseMul) { super(vs, size, reverseMul); + this.format = format; this.values = new long[size]; } @@ -304,7 +310,11 @@ void setTop(Comparable value) { if (value instanceof Number) { topValue = ((Number) value).longValue(); } else { - topValue = Long.parseLong(value.toString()); + // for date histogram source with "format", the after value is formatted + // as a string so we need to retrieve the original value in milliseconds. + topValue = format.parseLong(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 2652d90f8c3e7..85d172907e013 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.SortField; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -51,6 +52,7 @@ public abstract class CompositeValuesSourceBuilder config = ValuesSourceConfig.resolve(context.getQueryShardContext(), - valueType, field, script, missing, null, null); + valueType, field, script, missing, null, format); return innerBuild(context, config, pos, numPos, sortField); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 4d5c1c8c84683..ee70d3f39a550 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -19,30 +19,47 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.sort.SortOrder; class CompositeValuesSourceConfig { private final String name; private final ValuesSource vs; + private final DocValueFormat format; private final int reverseMul; private final boolean canEarlyTerminate; - CompositeValuesSourceConfig(String name, ValuesSource vs, SortOrder order, boolean canEarlyTerminate) { + CompositeValuesSourceConfig(String name, ValuesSource vs, DocValueFormat format, SortOrder order, boolean canEarlyTerminate) { this.name = name; this.vs = vs; + this.format = format; this.canEarlyTerminate = canEarlyTerminate; this.reverseMul = order == SortOrder.ASC ? 1 : -1; } + /** + * Returns the name associated with this configuration. + */ String name() { return name; } + /** + * Returns the {@link ValuesSource} for this configuration. + */ ValuesSource valuesSource() { return vs; } + /** + * The {@link DocValueFormat} to use for formatting the keys. + * {@link DocValueFormat#RAW} means no formatting. + */ + DocValueFormat format() { + return format; + } + /** * The sort order for the values source (e.g. -1 for descending and 1 for ascending). */ @@ -51,6 +68,9 @@ int reverseMul() { return reverseMul; } + /** + * Returns whether this {@link ValuesSource} is used to sort the index. + */ boolean canEarlyTerminate() { return canEarlyTerminate; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 0094da5069fd7..b7abf82a58ea3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.support.FieldContext; @@ -46,8 +48,8 @@ import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS; /** - * A {@link CompositeValuesSourceBuilder} that that builds a {@link RoundingValuesSource} from a {@link Script} or - * a field name. + * A {@link CompositeValuesSourceBuilder} that builds a {@link RoundingValuesSource} from a {@link Script} or + * a field name using the provided interval. */ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuilder { static final String TYPE = "date_histogram"; @@ -55,6 +57,7 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(DateHistogramValuesSourceBuilder.TYPE); + PARSER.declareString(DateHistogramValuesSourceBuilder::format, new ParseField("format")); PARSER.declareField((histogram, interval) -> { if (interval instanceof Long) { histogram.interval((long) interval); @@ -235,7 +238,11 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + // dates are returned as timestamp in milliseconds-since-the-epoch unless a specific date format + // is specified in the builder. + final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format(); + return new CompositeValuesSourceConfig(name, vs, docValueFormat, + order(), canEarlyTerminate); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index dd5eb1b52d04c..83ada5dbbc3c3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -37,7 +37,7 @@ import java.util.Objects; /** - * A {@link CompositeValuesSourceBuilder} that that builds a {@link HistogramValuesSource} from another numeric values source + * A {@link CompositeValuesSourceBuilder} that builds a {@link HistogramValuesSource} from another numeric values source * using the provided interval. */ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder { @@ -128,7 +128,7 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 824250948d740..fd9245a9c4a5b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations.bucket.composite; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -35,6 +37,7 @@ import java.util.AbstractSet; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -49,11 +52,14 @@ public class InternalComposite private final List buckets; private final int[] reverseMuls; private final List sourceNames; + private final List formats; - InternalComposite(String name, int size, List sourceNames, List buckets, int[] reverseMuls, + InternalComposite(String name, int size, List sourceNames, List formats, + List buckets, int[] reverseMuls, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.sourceNames = sourceNames; + this.formats = formats; this.buckets = buckets; this.size = size; this.reverseMuls = reverseMuls; @@ -63,14 +69,27 @@ public InternalComposite(StreamInput in) throws IOException { super(in); this.size = in.readVInt(); this.sourceNames = in.readList(StreamInput::readString); + this.formats = new ArrayList<>(sourceNames.size()); + for (int i = 0; i < sourceNames.size(); i++) { + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + formats.add(in.readNamedWriteable(DocValueFormat.class)); + } else { + formats.add(DocValueFormat.RAW); + } + } this.reverseMuls = in.readIntArray(); - this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, reverseMuls)); + this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls)); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeStringList(sourceNames); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + for (DocValueFormat format : formats) { + out.writeNamedWriteable(format); + } + } out.writeIntArray(reverseMuls); out.writeList(buckets); } @@ -87,12 +106,13 @@ public String getWriteableName() { @Override public InternalComposite create(List buckets) { - return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, pipelineAggregators(), getMetaData()); + return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, pipelineAggregators(), getMetaData()); } @Override public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) { - return new InternalBucket(prototype.sourceNames, prototype.key, prototype.reverseMuls, prototype.docCount, aggregations); + return new InternalBucket(prototype.sourceNames, prototype.formats, prototype.key, prototype.reverseMuls, + prototype.docCount, aggregations); } public int getSize() { @@ -149,7 +169,7 @@ public InternalAggregation doReduce(List aggregations, Redu reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); } - return new InternalComposite(name, size, sourceNames, result, reverseMuls, pipelineAggregators(), metaData); + return new InternalComposite(name, size, sourceNames, formats, result, reverseMuls, pipelineAggregators(), metaData); } @Override @@ -191,18 +211,21 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke private final InternalAggregations aggregations; private final transient int[] reverseMuls; private final transient List sourceNames; + private final transient List formats; - InternalBucket(List sourceNames, CompositeKey key, int[] reverseMuls, long docCount, InternalAggregations aggregations) { + InternalBucket(List sourceNames, List formats, CompositeKey key, int[] reverseMuls, long docCount, + InternalAggregations aggregations) { this.key = key; this.docCount = docCount; this.aggregations = aggregations; this.reverseMuls = reverseMuls; this.sourceNames = sourceNames; + this.formats = formats; } @SuppressWarnings("unchecked") - InternalBucket(StreamInput in, List sourceNames, int[] reverseMuls) throws IOException { + InternalBucket(StreamInput in, List sourceNames, List formats, int[] reverseMuls) throws IOException { final Comparable[] values = new Comparable[in.readVInt()]; for (int i = 0; i < values.length; i++) { values[i] = (Comparable) in.readGenericValue(); @@ -212,6 +235,7 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke this.aggregations = InternalAggregations.readAggregations(in); this.reverseMuls = reverseMuls; this.sourceNames = sourceNames; + this.formats = formats; } @Override @@ -242,9 +266,11 @@ public boolean equals(Object obj) { @Override public Map getKey() { - return new ArrayMap(sourceNames, key.values()); + // returns the formatted key in a map + return new ArrayMap(sourceNames, formats, key.values()); } + // get the raw key (without formatting to preserve the natural order). // visible for testing CompositeKey getRawKey() { return key; @@ -260,7 +286,7 @@ public String getKeyAsString() { } builder.append(sourceNames.get(i)); builder.append('='); - builder.append(formatObject(key.get(i))); + builder.append(formatObject(key.get(i), formats.get(i))); } builder.append('}'); return builder.toString(); @@ -284,7 +310,7 @@ InternalBucket reduce(List buckets, ReduceContext reduceContext) aggregations.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext); - return new InternalBucket(sourceNames, key, reverseMuls, docCount, aggs); + return new InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } @Override @@ -303,26 +329,52 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { /** - * See {@link CompositeAggregation#bucketToXContentFragment} + * See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } } - static Object formatObject(Object obj) { - if (obj instanceof BytesRef) { - return ((BytesRef) obj).utf8ToString(); + /** + * Format obj using the provided {@link DocValueFormat}. + * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is + * for numbers and a string for {@link BytesRef}s. + */ + static Object formatObject(Object obj, DocValueFormat format) { + if (obj.getClass() == BytesRef.class) { + BytesRef value = (BytesRef) obj; + if (format == DocValueFormat.RAW) { + return value.utf8ToString(); + } else { + return format.format((BytesRef) obj); + } + } else if (obj.getClass() == Long.class) { + Long value = (Long) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format(value); + } + } else if (obj.getClass() == Double.class) { + Double value = (Double) obj; + if (format == DocValueFormat.RAW) { + return value; + } else { + return format.format((Double) obj); + } } return obj; } private static class ArrayMap extends AbstractMap { final List keys; + final List formats; final Object[] values; - ArrayMap(List keys, Object[] values) { - assert keys.size() == values.length; + ArrayMap(List keys, List formats, Object[] values) { + assert keys.size() == values.length && keys.size() == formats.size(); this.keys = keys; + this.formats = formats; this.values = values; } @@ -335,7 +387,7 @@ public int size() { public Object get(Object key) { for (int i = 0; i < keys.size(); i++) { if (key.equals(keys.get(i))) { - return formatObject(values[i]); + return formatObject(values[i], formats.get(i)); } } return null; @@ -356,7 +408,7 @@ public boolean hasNext() { @Override public Entry next() { SimpleEntry entry = - new SimpleEntry<>(keys.get(pos), formatObject(values[pos])); + new SimpleEntry<>(keys.get(pos), formatObject(values[pos], formats.get(pos))); ++ pos; return entry; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 481c14a37f504..6ca5cdbcb6230 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -95,6 +95,6 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(), fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField); } - return new CompositeValuesSourceConfig(name, vs, order(), canEarlyTerminate); + return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 172aebbc0e5dc..0ebf957a8ddd1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -68,6 +69,9 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + public class CompositeAggregatorTests extends AggregatorTestCase { private static MappedFieldType[] FIELD_TYPES; @@ -761,6 +765,89 @@ public void testWithDateHistogram() throws IOException { ); } + public void testWithDateHistogramAndFormat() throws IOException { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + final Sort sort = new Sort(new SortedNumericSortField("date", SortField.Type.LONG)); + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{date=2016-09-20}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=2017-10-19}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=2017-10-20}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + } + ); + + testSearchCase(new MatchAllDocsQuery(), sort, dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "2016-09-20")); + + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{date=2017-10-19}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=2017-10-20}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + } + ); + } + + public void testThatDateHistogramFailsFormatAfter() throws IOException { + ElasticsearchParseException exc = expectThrows(ElasticsearchParseException.class, + () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "now")); + }, + (result) -> {} + )); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(exc.getCause().getMessage(), containsString("now() is not supported in [after] key")); + + exc = expectThrows(ElasticsearchParseException.class, + () -> testSearchCase(new MatchAllDocsQuery(), null, Collections.emptyList(), + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") + .field("date") + .dateHistogramInterval(DateHistogramInterval.days(1)) + .format("yyyy-MM-dd"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) + .aggregateAfter(createAfterKey("date", "1474329600000")); + }, + (result) -> {} + )); + assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(exc.getCause().getMessage(), containsString("Parse failure")); + } + public void testWithDateHistogramAndTimeZone() throws IOException { final List>> dataset = new ArrayList<>(); dataset.addAll( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 10cc5b8016dc5..322b70cb2d971 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -21,12 +21,15 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; +import org.joda.time.DateTimeZone; import org.junit.After; import java.io.IOException; @@ -41,28 +44,45 @@ import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class InternalCompositeTests extends InternalMultiBucketAggregationTestCase { private List sourceNames; + private List formats; private int[] reverseMuls; - private int[] formats; + private int[] types; private int size; + private static DocValueFormat randomDocValueFormat(boolean isLong) { + if (isLong) { + // we use specific format only for date histogram on a long/date field + if (randomBoolean()) { + return new DocValueFormat.DateTime(Joda.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + } else { + return DocValueFormat.RAW; + } + } else { + // and the raw format for the other types + return DocValueFormat.RAW; + } + } + @Override public void setUp() throws Exception { super.setUp(); int numFields = randomIntBetween(1, 10); size = randomNumberOfBuckets(); sourceNames = new ArrayList<>(); + formats = new ArrayList<>(); reverseMuls = new int[numFields]; - formats = new int[numFields]; + types = new int[numFields]; for (int i = 0; i < numFields; i++) { sourceNames.add("field_" + i); reverseMuls[i] = randomBoolean() ? 1 : -1; - formats[i] = randomIntBetween(0, 2); + int type = randomIntBetween(0, 2); + types[i] = type; + formats.add(randomDocValueFormat(type == 0)); } } @@ -70,9 +90,10 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { super.tearDown(); - sourceNames= null; - reverseMuls = null; + sourceNames = null; formats = null; + reverseMuls = null; + types = null; } @Override @@ -93,7 +114,7 @@ protected

P parseAndAssert(final InternalAggregati private CompositeKey createCompositeKey() { Comparable[] keys = new Comparable[sourceNames.size()]; for (int j = 0; j < keys.length; j++) { - switch (formats[j]) { + switch (types[j]) { case 0: keys[j] = randomLong(); break; @@ -123,19 +144,6 @@ private Comparator getKeyComparator() { }; } - @SuppressWarnings("unchecked") - private Comparator getBucketComparator() { - return (o1, o2) -> { - for (int i = 0; i < o1.getRawKey().size(); i++) { - int cmp = ((Comparable) o1.getRawKey().get(i)).compareTo(o2.getRawKey().get(i)) * reverseMuls[i]; - if (cmp != 0) { - return cmp; - } - } - return 0; - }; - } - @Override protected InternalComposite createTestInstance(String name, List pipelineAggregators, Map metaData, InternalAggregations aggregations) { @@ -149,11 +157,11 @@ protected InternalComposite createTestInstance(String name, List o1.compareKey(o2)); - return new InternalComposite(name, size, sourceNames, buckets, reverseMuls, Collections.emptyList(), metaData); + return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, Collections.emptyList(), metaData); } @Override @@ -172,7 +180,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO break; case 1: buckets = new ArrayList<>(buckets); - buckets.add(new InternalComposite.InternalBucket(sourceNames, createCompositeKey(), reverseMuls, + buckets.add(new InternalComposite.InternalBucket(sourceNames, formats, createCompositeKey(), reverseMuls, randomLongBetween(1, 100), InternalAggregations.EMPTY) ); break; @@ -187,7 +195,7 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO default: throw new AssertionError("illegal branch"); } - return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, buckets, reverseMuls, + return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, formats, buckets, reverseMuls, instance.pipelineAggregators(), metaData); } From b2ce994be7b99b9239938c411630e690debd216d Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 23 Jan 2018 16:41:32 +0100 Subject: [PATCH 89/94] [Docs] Fix asciidoc style in composite agg docs --- .../aggregations/bucket/composite-aggregation.asciidoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index 438eb5afc0162..be18689bfddc4 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -224,7 +224,6 @@ Time values can also be specified via abbreviations supported by < Supports expressive date <> -[float] ====== Time Zone Date-times are stored in Elasticsearch in UTC. By default, all bucketing and From 4d3f7a7695d252dcfc97242f179e934d7d043c51 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 23 Jan 2018 16:57:26 +0100 Subject: [PATCH 90/94] Ensure we protect Collections obtained from scripts from self-referencing (#28335) Self referencing maps can cause SOE if they are iterated ie. in their toString methods. This chance adds some protected to the usage of those collections. --- .../CustomReflectionObjectHandler.java | 6 +++ .../rest-api-spec/test/painless/15_update.yml | 2 +- .../rest-api-spec/test/painless/30_search.yml | 36 ++++++++++++++++++ .../common/util/CollectionUtils.java | 38 +++++++++++++++++++ .../common/xcontent/XContentBuilder.java | 36 ++---------------- .../scripted/ScriptedMetricAggregator.java | 2 + .../BucketScriptPipelineAggregator.java | 3 +- .../aggregations/support/ValuesSource.java | 5 ++- .../support/values/ScriptBytesValues.java | 2 + .../subphase/ScriptFieldsFetchSubPhase.java | 2 + .../search/sort/ScriptSortBuilder.java | 5 ++- .../common/util/CollectionUtilsTests.java | 16 ++++++++ .../common/xcontent/BaseXContentTestCase.java | 23 +++++------ 13 files changed, 128 insertions(+), 48 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java index eef9d7af8dd01..79319369489fd 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java @@ -20,6 +20,7 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.reflect.ReflectionObjectHandler; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.iterable.Iterables; import java.lang.reflect.Array; @@ -154,4 +155,9 @@ public Iterator iterator() { } } + @Override + public String stringify(Object object) { + CollectionUtils.ensureNoSelfReferences(object); + return super.stringify(object); + } } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml index 0e319be97bf0b..20047e7d4825d 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/15_update.yml @@ -137,4 +137,4 @@ - match: { error.root_cause.0.type: "remote_transport_exception" } - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "Object has already been built and is self-referencing itself" } + - match: { error.reason: "Iterable object is self-referencing itself" } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index 28679cb223fd1..b7be116b38695 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -406,3 +406,39 @@ - match: { hits.hits.0._score: 1.0 } - match: { aggregations.value_agg.buckets.0.key: 2 } - match: { aggregations.value_agg.buckets.0.doc_count: 1 } + +--- +"Return self-referencing map": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: "1" + + - do: + index: + index: test + type: test + id: 1 + body: { "genre": 1 } + + - do: + indices.refresh: {} + + - do: + catch: bad_request + index: test + search: + body: + aggs: + genre: + terms: + script: + lang: painless + source: "def x = [:] ; def y = [:] ; x.a = y ; y.a = x ; return x" + + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Iterable object is self-referencing itself" } + - match: { error.type: "search_phase_execution_exception" } + - match: { error.reason: "all shards failed" } diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 54a49f7e4f254..08d02cdea3172 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -19,16 +19,20 @@ package org.elasticsearch.common.util; +import java.nio.file.Path; import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.IdentityHashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.RandomAccess; +import java.util.Set; import com.carrotsearch.hppc.ObjectArrayList; import org.apache.lucene.util.BytesRef; @@ -221,6 +225,40 @@ public static int[] toArray(Collection ints) { return ints.stream().mapToInt(s -> s).toArray(); } + public static void ensureNoSelfReferences(Object value) { + Iterable it = convert(value); + if (it != null) { + ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + } + + private static Iterable convert(Object value) { + if (value == null) { + return null; + } + if (value instanceof Map) { + return ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + return (Iterable) value; + } else if (value instanceof Object[]) { + return Arrays.asList((Object[]) value); + } else { + return null; + } + } + + private static void ensureNoSelfReferences(final Iterable value, Object originalReference, final Set ancestors) { + if (value != null) { + if (ancestors.add(originalReference) == false) { + throw new IllegalArgumentException("Iterable object is self-referencing itself"); + } + for (Object o : value) { + ensureNoSelfReferences(convert(o), o, ancestors); + } + ancestors.remove(originalReference); + } + } + private static class RotatedList extends AbstractList implements RandomAccess { private final List in; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 070510e13ff69..9f7603c997ea8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; import org.joda.time.format.DateTimeFormatter; @@ -43,7 +44,6 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; -import java.util.IdentityHashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -780,7 +780,6 @@ private XContentBuilder values(Object[] values, boolean ensureNoSelfReferences) if (values == null) { return nullValue(); } - return value(Arrays.asList(values), ensureNoSelfReferences); } @@ -865,7 +864,7 @@ private XContentBuilder map(Map values, boolean ensureNoSelfReference // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error if (ensureNoSelfReferences) { - ensureNoSelfReferences(values); + CollectionUtils.ensureNoSelfReferences(values); } startObject(); @@ -894,9 +893,8 @@ private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error if (ensureNoSelfReferences) { - ensureNoSelfReferences(values); + CollectionUtils.ensureNoSelfReferences(values); } - startArray(); for (Object value : values) { // pass ensureNoSelfReferences=false as we already performed the check at a higher level @@ -1067,32 +1065,4 @@ static void ensureNotNull(Object value, String message) { throw new IllegalArgumentException(message); } } - - static void ensureNoSelfReferences(Object value) { - ensureNoSelfReferences(value, Collections.newSetFromMap(new IdentityHashMap<>())); - } - - private static void ensureNoSelfReferences(final Object value, final Set ancestors) { - if (value != null) { - - Iterable it; - if (value instanceof Map) { - it = ((Map) value).values(); - } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - it = (Iterable) value; - } else if (value instanceof Object[]) { - it = Arrays.asList((Object[]) value); - } else { - return; - } - - if (ancestors.add(value) == false) { - throw new IllegalArgumentException("Object has already been built and is self-referencing itself"); - } - for (Object o : it) { - ensureNoSelfReferences(o, ancestors); - } - ancestors.remove(value); - } - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index bebe9f892b6c3..04ef595690a33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.SearchScript; @@ -77,6 +78,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) { Object aggregation; if (combineScript != null) { aggregation = combineScript.run(); + CollectionUtils.ensureNoSelfReferences(aggregation); } else { aggregation = params.get("_agg"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 0a56ae2c1cbfa..42337fbce0f98 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -112,10 +112,11 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else { ExecutableScript executableScript = factory.newInstance(vars); Object returned = executableScript.run(); + // no need to check for self references since only numbers are valid if (returned == null) { newBuckets.add(bucket); } else { - if (!(returned instanceof Number)) { + if ((returned instanceof Number) == false) { throw new AggregationExecutionException("series_arithmetic script for reducer [" + name() + "] must return a Number"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index b5a109e89cbad..6dc2758fa5c25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -30,6 +30,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.ScorerAware; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -460,7 +461,9 @@ public boolean advanceExact(int doc) throws IOException { for (int i = 0; i < count; ++i) { final BytesRef value = bytesValues.nextValue(); script.setNextAggregationValue(value.utf8ToString()); - values[i].copyChars(script.run().toString()); + Object run = script.run(); + CollectionUtils.ensureNoSelfReferences(run); + values[i].copyChars(run.toString()); } sort(); return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java index 38950325daa13..662d856603e54 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java @@ -20,6 +20,7 @@ import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.ScorerAware; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortingBinaryDocValues; import org.elasticsearch.script.SearchScript; @@ -44,6 +45,7 @@ private void set(int i, Object o) { if (o == null) { values[i].clear(); } else { + CollectionUtils.ensureNoSelfReferences(o); values[i].copyChars(o.toString()); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java index c45734108f56d..948bcc3e0b3ec 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -64,6 +65,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept final Object value; try { value = leafScripts[i].run(); + CollectionUtils.ensureNoSelfReferences(value); } catch (RuntimeException e) { if (scriptFields.get(i).ignoreException()) { continue; diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 331988a183fa9..99668515de5b1 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -341,7 +342,9 @@ public boolean advanceExact(int doc) throws IOException { } @Override public BytesRef binaryValue() { - spare.copyChars(leafScript.run().toString()); + final Object run = leafScript.run(); + CollectionUtils.ensureNoSelfReferences(run); + spare.copyChars(run.toString()); return spare.get(); } }; diff --git a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index 8c192a2a35091..2ca8189a972fd 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,16 +25,21 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; +import static java.util.Collections.emptyMap; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -176,4 +181,15 @@ public void testPerfectPartition() { eagerPartition(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 6) ); } + + public void testEnsureNoSelfReferences() { + CollectionUtils.ensureNoSelfReferences(emptyMap()); + CollectionUtils.ensureNoSelfReferences(null); + + Map map = new HashMap<>(); + map.put("field", map); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> CollectionUtils.ensureNoSelfReferences(map)); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); + } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e368163a4e95c..c7205b3200f1c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -854,19 +855,19 @@ public void testEnsureNotNull() { } public void testEnsureNoSelfReferences() throws IOException { - XContentBuilder.ensureNoSelfReferences(emptyMap()); - XContentBuilder.ensureNoSelfReferences(null); + CollectionUtils.ensureNoSelfReferences(emptyMap()); + CollectionUtils.ensureNoSelfReferences(null); Map map = new HashMap<>(); map.put("field", map); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } /** * Test that the same map written multiple times do not trigger the self-reference check in - * {@link XContentBuilder#ensureNoSelfReferences(Object)} + * {@link CollectionUtils#ensureNoSelfReferences(Object)} */ public void testRepeatedMapsAndNoSelfReferences() throws Exception { Map mapB = singletonMap("b", "B"); @@ -899,7 +900,7 @@ public void testSelfReferencingMapsOneLevel() throws IOException { map1.put("map0", map0); // map 1 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingMapsTwoLevels() throws IOException { @@ -917,7 +918,7 @@ public void testSelfReferencingMapsTwoLevels() throws IOException { map2.put("map0", map0); // map 2 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingObjectsArray() throws IOException { @@ -930,13 +931,13 @@ public void testSelfReferencingObjectsArray() throws IOException { .startObject() .field("field", values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); e = expectThrows(IllegalArgumentException.class, () -> builder() .startObject() .array("field", values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterable() throws IOException { @@ -949,7 +950,7 @@ public void testSelfReferencingIterable() throws IOException { .startObject() .field("field", (Iterable) values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterableOneLevel() throws IOException { @@ -964,7 +965,7 @@ public void testSelfReferencingIterableOneLevel() throws IOException { .startObject() .field("field", (Iterable) values) .endObject()); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testSelfReferencingIterableTwoLevels() throws IOException { @@ -984,7 +985,7 @@ public void testSelfReferencingIterableTwoLevels() throws IOException { map2.put("map0", map0); // map 2 -> map 0 loop IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> builder().map(map0)); - assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); + assertThat(e.getMessage(), containsString("Iterable object is self-referencing itself")); } public void testChecksForDuplicates() throws Exception { From 049f29710e609818d3a4558988e91a8e0bafbbc3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 23 Jan 2018 12:14:23 -0500 Subject: [PATCH 91/94] Reindex: log more on rare test failure The test failure tracked by #26758 occurs when we cancel a running reindex request that has been sliced into many children. The main reindex response *looks* canceled but none of the children look canceled. This is super strange because for the main request to look canceled for any length of time one of the children has to be canceled. This change adds additional logging to the test so we have more to go on to debug this the next time it fails. --- .../index/reindex/CancelTests.java | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 3ad48d803a437..f21fb45ed7a64 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -91,6 +91,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder int numDocs = getNumShards(INDEX).numPrimaries * 10 * builder.request().getSlices(); ALLOWED_OPERATIONS.release(numDocs); + logger.debug("setting up [{}] docs", numDocs); indexRandom(true, false, true, IntStream.range(0, numDocs) .mapToObj(i -> client().prepareIndex(INDEX, TYPE, String.valueOf(i)).setSource("n", i)) .collect(Collectors.toList())); @@ -102,16 +103,21 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder // Scroll by 1 so that cancellation is easier to control builder.source().setSize(1); - /* Allow a random number of the documents less the number of workers to be modified by the reindex action. That way at least one - * worker is blocked. */ + /* Allow a random number of the documents less the number of workers + * to be modified by the reindex action. That way at least one worker + * is blocked. */ int numModifiedDocs = randomIntBetween(builder.request().getSlices() * 2, numDocs); + logger.debug("chose to modify [{}] docs", numModifiedDocs); ALLOWED_OPERATIONS.release(numModifiedDocs - builder.request().getSlices()); // Now execute the reindex action... ActionFuture future = builder.execute(); - /* ... and waits for the indexing operation listeners to block. It is important to realize that some of the workers might have - * exhausted their slice while others might have quite a bit left to work on. We can't control that. */ + /* ... and wait for the indexing operation listeners to block. It + * is important to realize that some of the workers might have + * exhausted their slice while others might have quite a bit left + * to work on. We can't control that. */ + logger.debug("waiting for updates to be blocked"); awaitBusy(() -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0); // Status should show the task running @@ -128,15 +134,19 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder cancelTasksResponse.rethrowFailures("Cancel"); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); - // The status should now show canceled. The request will still be in the list because it is (or its children are) still blocked. + /* The status should now show canceled. The request will still be in the + * list because it is (or its children are) still blocked. */ mainTask = client().admin().cluster().prepareGetTask(mainTask.getTaskId()).get().getTask().getTask(); status = (BulkByScrollTask.Status) mainTask.getStatus(); + logger.debug("asserting that parent is marked canceled {}", status); assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); + if (builder.request().getSlices() > 1) { boolean foundCancelled = false; ListTasksResponse sliceList = client().admin().cluster().prepareListTasks().setParentTaskId(mainTask.getTaskId()) .setDetailed(true).get(); sliceList.rethrowFailures("Fetch slice tasks"); + logger.debug("finding at least one canceled child among {}", sliceList.getTasks()); for (TaskInfo slice: sliceList.getTasks()) { BulkByScrollTask.Status sliceStatus = (BulkByScrollTask.Status) slice.getStatus(); if (sliceStatus.getReasonCancelled() == null) continue; @@ -146,7 +156,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder assertTrue("Didn't find at least one sub task that was cancelled", foundCancelled); } - // Unblock the last operations + logger.debug("unblocking the blocked update"); ALLOWED_OPERATIONS.release(builder.request().getSlices()); // Checks that no more operations are executed From eded5bc4f3776c9841a4995972f29c6c07d81c46 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 23 Jan 2018 13:35:23 -0500 Subject: [PATCH 92/94] Reindex: Wait for deletion in test The test failure tracked by #28053 occurs because we fail to get the failure response from the reindex on the first try and on our second try the delete index API call that was supposed to trigger the failure actually deletes the index during document creation. This causes the test to fail catastrophically. This PR attempts to wait for the failure to finish before the test moves on to the second attempt. The failure doesn't reproduce locally for me so I can't be sure that this helps at all with the failure, but it certainly feels like it should help some. Here is hoping this prevents similar failures in the future. --- .../elasticsearch/index/reindex/ReindexFailureTests.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index f101b12538289..f5e234f66ca57 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -107,6 +107,14 @@ public void testResponseOnSearchFailure() throws Exception { response.get(); logger.info("Didn't trigger a reindex failure on the {} attempt", attempt); attempt++; + /* + * In the past we've seen the delete of the source index + * actually take effect *during* the `indexDocs` call in + * the next step. This breaks things pretty disasterously + * so we *try* and wait for the delete to be fully + * complete here. + */ + assertBusy(() -> assertFalse(client().admin().indices().prepareExists("source").get().isExists())); } catch (ExecutionException e) { logger.info("Triggered a reindex failure on the {} attempt: {}", attempt, e.getMessage()); assertThat(e.getMessage(), From 7c5619a29a789c10e2b436f83a83484d1cda732b Mon Sep 17 00:00:00 2001 From: Lukas Olson Date: Tue, 23 Jan 2018 12:29:11 -0700 Subject: [PATCH 93/94] Fix spelling error --- docs/reference/query-dsl/query-string-syntax.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 8a7b394b2e870..c73543c99a1d9 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -56,7 +56,7 @@ match the query string `"a* b* c*"`. [WARNING] ======= Pure wildcards `\*` are rewritten to <> queries for efficiency. -As a consequence, the wildcard `"field:*"` would match documents with an emtpy value +As a consequence, the wildcard `"field:*"` would match documents with an empty value like the following: ``` { From b94500693815f0eb39f8e53e4d2fc99b27fa7d38 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 23 Jan 2018 12:38:31 -0800 Subject: [PATCH 94/94] Completely remove Painless Type from AnalyzerCaster in favor of Java Class. (#28329) Second part in a series of PR's to remove Painless Type in favor of Java Class. This completely removes the Painless Type dependency from AnalyzerCaster. Both casting and promotion are now based on Java Class exclusively. This also allows AnalyzerCaster to be decoupled from Definition and make cast checks be static calls again. --- .../painless/AnalyzerCaster.java | 239 ++++++++---------- .../elasticsearch/painless/Definition.java | 38 ++- .../painless/node/AExpression.java | 5 +- .../painless/node/EAssignment.java | 43 ++-- .../elasticsearch/painless/node/EBinary.java | 37 ++- .../painless/node/ECapturingFunctionRef.java | 5 +- .../elasticsearch/painless/node/EComp.java | 24 +- .../painless/node/EConditional.java | 3 +- .../elasticsearch/painless/node/EElvis.java | 4 +- .../painless/node/EFunctionRef.java | 5 +- .../elasticsearch/painless/node/ELambda.java | 5 +- .../elasticsearch/painless/node/EUnary.java | 6 +- .../painless/node/SSubEachArray.java | 11 +- .../painless/node/SSubEachIterable.java | 3 +- .../painless/AnalyzerCasterTests.java | 93 ++++--- 15 files changed, 272 insertions(+), 249 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java index 7bae2c7fcad69..abba62de39c19 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Definition.def; import java.util.Objects; @@ -31,26 +30,9 @@ */ public final class AnalyzerCaster { - private Definition definition; - - public AnalyzerCaster(Definition definition) { - this.definition = definition; - } - - public Cast getLegalCast(Location location, Type actualType, Type expectedType, boolean explicit, boolean internal) { - Objects.requireNonNull(actualType); - Objects.requireNonNull(expectedType); - - Class actual = actualType.clazz; - Class expected = expectedType.clazz; - - if (actualType.dynamic) { - actual = Definition.ObjectClassTodefClass(actual); - } - - if (expectedType.dynamic) { - expected = Definition.ObjectClassTodefClass(expected); - } + public static Cast getLegalCast(Location location, Class actual, Class expected, boolean explicit, boolean internal) { + Objects.requireNonNull(actual); + Objects.requireNonNull(expected); if (actual == expected) { return null; @@ -487,7 +469,7 @@ public Cast getLegalCast(Location location, Type actualType, Type expectedType, } } - public Object constCast(Location location, final Object constant, final Cast cast) { + public static Object constCast(Location location, Object constant, Cast cast) { Class fsort = cast.from; Class tsort = cast.to; @@ -498,7 +480,7 @@ public Object constCast(Location location, final Object constant, final Cast cas } else if (fsort == char.class && tsort == String.class) { return Utility.charToString((char)constant); } else if (fsort.isPrimitive() && fsort != boolean.class && tsort.isPrimitive() && tsort != boolean.class) { - final Number number; + Number number; if (fsort == char.class) { number = (int)(char)constant; @@ -523,224 +505,201 @@ public Object constCast(Location location, final Object constant, final Cast cas } } - public Type promoteNumeric(Type from, boolean decimal) { - Class sort = from.clazz; - - if (from.dynamic) { - return definition.DefType; - } else if ((sort == double.class) && decimal) { - return definition.doubleType; - } else if ((sort == float.class) && decimal) { - return definition.floatType; - } else if (sort == long.class) { - return definition.longType; - } else if (sort == int.class || sort == char.class || sort == short.class || sort == byte.class) { - return definition.intType; + public static Class promoteNumeric(Class from, boolean decimal) { + if (from == def.class || from == double.class && decimal || from == float.class && decimal || from == long.class) { + return from; + } else if (from == int.class || from == char.class || from == short.class || from == byte.class) { + return int.class; } return null; } - public Type promoteNumeric(Type from0, Type from1, boolean decimal) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteNumeric(Class from0, Class from1, boolean decimal) { + if (from0 == def.class || from1 == def.class) { + return def.class; } if (decimal) { - if (sort0 == double.class || sort1 == double.class) { - return definition.doubleType; - } else if (sort0 == float.class || sort1 == float.class) { - return definition.floatType; + if (from0 == double.class || from1 == double.class) { + return double.class; + } else if (from0 == float.class || from1 == float.class) { + return float.class; } } - if (sort0 == long.class || sort1 == long.class) { - return definition.longType; - } else if (sort0 == int.class || sort1 == int.class || - sort0 == char.class || sort1 == char.class || - sort0 == short.class || sort1 == short.class || - sort0 == byte.class || sort1 == byte.class) { - return definition.intType; + if (from0 == long.class || from1 == long.class) { + return long.class; + } else if (from0 == int.class || from1 == int.class || + from0 == char.class || from1 == char.class || + from0 == short.class || from1 == short.class || + from0 == byte.class || from1 == byte.class) { + return int.class; } return null; } - public Type promoteAdd(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (sort0 == String.class || sort1 == String.class) { - return definition.StringType; + public static Class promoteAdd(Class from0, Class from1) { + if (from0 == String.class || from1 == String.class) { + return String.class; } return promoteNumeric(from0, from1, true); } - public Type promoteXor(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteXor(Class from0, Class from1) { + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0 == boolean.class || sort1 == boolean.class) { - return definition.booleanType; + if (from0 == boolean.class || from1 == boolean.class) { + return boolean.class; } return promoteNumeric(from0, from1, false); } - public Type promoteEquality(Type from0, Type from1) { - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + public static Class promoteEquality(Class from0, Class from1) { + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0.isPrimitive() && sort1.isPrimitive()) { - if (sort0 == boolean.class && sort1 == boolean.class) { - return definition.booleanType; + if (from0.isPrimitive() && from1.isPrimitive()) { + if (from0 == boolean.class && from1 == boolean.class) { + return boolean.class; } return promoteNumeric(from0, from1, true); } - return definition.ObjectType; + return Object.class; } - public Type promoteConditional(Type from0, Type from1, Object const0, Object const1) { - if (from0.equals(from1)) { + public static Class promoteConditional(Class from0, Class from1, Object const0, Object const1) { + if (from0 == from1) { return from0; } - Class sort0 = from0.clazz; - Class sort1 = from1.clazz; - - if (from0.dynamic || from1.dynamic) { - return definition.DefType; + if (from0 == def.class || from1 == def.class) { + return def.class; } - if (sort0.isPrimitive() && sort1.isPrimitive()) { - if (sort0 == boolean.class && sort1 == boolean.class) { - return definition.booleanType; + if (from0.isPrimitive() && from1.isPrimitive()) { + if (from0 == boolean.class && from1 == boolean.class) { + return boolean.class; } - if (sort0 == double.class || sort1 == double.class) { - return definition.doubleType; - } else if (sort0 == float.class || sort1 == float.class) { - return definition.floatType; - } else if (sort0 == long.class || sort1 == long.class) { - return definition.longType; + if (from0 == double.class || from1 == double.class) { + return double.class; + } else if (from0 == float.class || from1 == float.class) { + return float.class; + } else if (from0 == long.class || from1 == long.class) { + return long.class; } else { - if (sort0 == byte.class) { - if (sort1 == byte.class) { - return definition.byteType; - } else if (sort1 == short.class) { + if (from0 == byte.class) { + if (from1 == byte.class) { + return byte.class; + } else if (from1 == short.class) { if (const1 != null) { final short constant = (short)const1; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.shortType; - } else if (sort1 == char.class) { - return definition.intType; - } else if (sort1 == int.class) { + return short.class; + } else if (from1 == char.class) { + return int.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == short.class) { - if (sort1 == byte.class) { + } else if (from0 == short.class) { + if (from1 == byte.class) { if (const0 != null) { final short constant = (short)const0; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.shortType; - } else if (sort1 == short.class) { - return definition.shortType; - } else if (sort1 == char.class) { - return definition.intType; - } else if (sort1 == int.class) { + return short.class; + } else if (from1 == short.class) { + return short.class; + } else if (from1 == char.class) { + return int.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return definition.shortType; + return short.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == char.class) { - if (sort1 == byte.class) { - return definition.intType; - } else if (sort1 == short.class) { - return definition.intType; - } else if (sort1 == char.class) { - return definition.charType; - } else if (sort1 == int.class) { + } else if (from0 == char.class) { + if (from1 == byte.class) { + return int.class; + } else if (from1 == short.class) { + return int.class; + } else if (from1 == char.class) { + return char.class; + } else if (from1 == int.class) { if (const1 != null) { final int constant = (int)const1; if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; + return int.class; } - } else if (sort0 == int.class) { - if (sort1 == byte.class) { + } else if (from0 == int.class) { + if (from1 == byte.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == short.class) { + return int.class; + } else if (from1 == short.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == char.class) { + return int.class; + } else if (from1 == char.class) { if (const0 != null) { final int constant = (int)const0; if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return definition.byteType; + return byte.class; } } - return definition.intType; - } else if (sort1 == int.class) { - return definition.intType; + return int.class; + } else if (from1 == int.class) { + return int.class; } } } @@ -750,6 +709,10 @@ public Type promoteConditional(Type from0, Type from1, Object const0, Object con // TODO: to calculate the highest upper bound for the two types and return that. // TODO: However, for now we just return objectType that may require an extra cast. - return definition.ObjectType; + return Object.class; + } + + private AnalyzerCaster() { + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 52f0c2c63302d..36c072570ec14 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -591,7 +591,39 @@ public static String ClassToName(Class clazz) { return "def"; } - return clazz.getCanonicalName(); + return clazz.getCanonicalName().replace('$', '.'); + } + + public Type ClassToType(Class clazz) { + if (clazz == null) { + return null; + } else if (clazz.isArray()) { + Class component = clazz.getComponentType(); + int dimensions = 1; + + while (component.isArray()) { + component = component.getComponentType(); + ++dimensions; + } + + if (clazz == def.class) { + return getType(structsMap.get("def"), dimensions); + } else { + return getType(runtimeMap.get(clazz).struct, dimensions); + } + } else if (clazz == def.class) { + return getType(structsMap.get("def"), 0); + } + + return getType(structsMap.get(ClassToName(clazz)), 0); + } + + public static Class TypeToClass (Type type) { + if (type.dynamic) { + return ObjectClassTodefClass(type.clazz); + } + + return type.clazz; } public RuntimeClass getRuntimeClass(Class clazz) { @@ -631,8 +663,6 @@ private static String buildFieldCacheKey(String structName, String fieldName, St private final Map structsMap; private final Map simpleTypesMap; - public AnalyzerCaster caster; - public Definition(List whitelists) { structsMap = new HashMap<>(); simpleTypesMap = new HashMap<>(); @@ -814,8 +844,6 @@ public Definition(List whitelists) { IteratorType = getType("Iterator"); ArrayListType = getType("ArrayList"); HashMapType = getType("HashMap"); - - caster = new AnalyzerCaster(this); } private void addStruct(ClassLoader whitelistClassLoader, Whitelist.Struct whitelistStruct) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java index 2ca0b265430f9..eaa13ea9a8b17 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java @@ -119,7 +119,8 @@ public abstract class AExpression extends ANode { * @return The new child node for the parent node calling this method. */ AExpression cast(Locals locals) { - Cast cast = locals.getDefinition().caster.getLegalCast(location, actual, expected, explicit, internal); + Cast cast = + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(actual), Definition.TypeToClass(expected), explicit, internal); if (cast == null) { if (constant == null || this instanceof EConstant) { @@ -167,7 +168,7 @@ AExpression cast(Locals locals) { // from this node because the output data for the EConstant // will already be the same. - constant = locals.getDefinition().caster.constCast(location, constant, cast); + constant = AnalyzerCaster.constCast(location, constant, cast); EConstant econstant = new EConstant(location, constant); econstant.analyze(locals); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java index 873f109e72d47..45ca4601e963d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java @@ -19,7 +19,10 @@ package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.DefBootstrap; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; @@ -139,33 +142,41 @@ private void analyzeCompound(Locals locals) { boolean shift = false; if (operation == Operation.MUL) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.DIV) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.REM) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.ADD) { - promote = locals.getDefinition().caster.promoteAdd(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteAdd(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.SUB) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, rhs.actual, true); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), true)); } else if (operation == Operation.LSH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.RSH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.USH) { - promote = locals.getDefinition().caster.promoteNumeric(lhs.actual, false); - shiftDistance = locals.getDefinition().caster.promoteNumeric(rhs.actual, false); + promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(lhs.actual), false)); + shiftDistance = locals.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(rhs.actual), false)); shift = true; } else if (operation == Operation.BWAND) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.XOR) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else if (operation == Operation.BWOR) { - promote = locals.getDefinition().caster.promoteXor(lhs.actual, rhs.actual); + promote = locals.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual))); } else { throw createError(new IllegalStateException("Illegal tree structure.")); } @@ -199,8 +210,8 @@ private void analyzeCompound(Locals locals) { rhs = rhs.cast(locals); - there = locals.getDefinition().caster.getLegalCast(location, lhs.actual, promote, false, false); - back = locals.getDefinition().caster.getLegalCast(location, promote, lhs.actual, true, false); + there = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(lhs.actual), Definition.TypeToClass(promote), false, false); + back = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(promote), Definition.TypeToClass(lhs.actual), true, false); this.statement = true; this.actual = read ? lhs.actual : locals.getDefinition().voidType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index df92d72a3c0c5..55c2145acd8cd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless.node; +import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.DefBootstrap; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Type; @@ -101,7 +102,8 @@ private void analyzeMul(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply multiply [*] to types " + @@ -145,7 +147,8 @@ private void analyzeDiv(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply divide [/] to types " + @@ -194,7 +197,8 @@ private void analyzeRem(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply remainder [%] to types " + @@ -243,7 +247,8 @@ private void analyzeAdd(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteAdd(left.actual, right.actual); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteAdd(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promote == null) { throw createError(new ClassCastException("Cannot apply add [+] to types " + @@ -303,7 +308,8 @@ private void analyzeSub(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply subtract [-] to types " + @@ -362,8 +368,8 @@ private void analyzeLSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); if (lhspromote == null || rhspromote == null) { throw createError(new ClassCastException("Cannot apply left shift [<<] to types " + @@ -411,8 +417,8 @@ private void analyzeRSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); if (lhspromote == null || rhspromote == null) { throw createError(new ClassCastException("Cannot apply right shift [>>] to types " + @@ -460,8 +466,8 @@ private void analyzeUSH(Locals variables) { left.analyze(variables); right.analyze(variables); - Type lhspromote = variables.getDefinition().caster.promoteNumeric(left.actual, false); - Type rhspromote = variables.getDefinition().caster.promoteNumeric(right.actual, false); + Type lhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), false)); + Type rhspromote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(right.actual), false)); actual = promote = lhspromote; shiftDistance = rhspromote; @@ -509,7 +515,8 @@ private void analyzeBWAnd(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, false); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply and [&] to types " + @@ -550,7 +557,8 @@ private void analyzeXor(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteXor(left.actual, right.actual); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteXor(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promote == null) { throw createError(new ClassCastException("Cannot apply xor [^] to types " + @@ -592,7 +600,8 @@ private void analyzeBWOr(Locals variables) { left.analyze(variables); right.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, false); + promote = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply or [|] to types " + diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index 564fcef8eef9f..e736b2779f932 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -82,11 +82,12 @@ void analyze(Locals locals) { for (int i = 0; i < ref.interfaceMethod.arguments.size(); ++i) { Definition.Type from = ref.interfaceMethod.arguments.get(i); Definition.Type to = ref.delegateMethod.arguments.get(i); - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (ref.interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, ref.delegateMethod.rtn, ref.interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast(location, + Definition.TypeToClass(ref.delegateMethod.rtn), Definition.TypeToClass(ref.interfaceMethod.rtn), false, true); } } catch (IllegalArgumentException e) { throw createError(e); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java index 020ea48cd4c1b..a7bb57a1a35a4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java @@ -89,7 +89,8 @@ private void analyzeEq(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply equals [==] to types " + @@ -140,7 +141,8 @@ private void analyzeEqR(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply reference equals [===] to types " + @@ -182,7 +184,8 @@ private void analyzeNE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply not equals [!=] to types " + @@ -233,7 +236,8 @@ private void analyzeNER(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteEquality(left.actual, right.actual); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteEquality(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual))); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply reference not equals [!==] to types " + @@ -275,7 +279,8 @@ private void analyzeGTE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply greater than or equals [>=] to types " + @@ -316,7 +321,8 @@ private void analyzeGT(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply greater than [>] to types " + @@ -357,7 +363,8 @@ private void analyzeLTE(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply less than or equals [<=] to types " + @@ -398,7 +405,8 @@ private void analyzeLT(Locals variables) { left.analyze(variables); right.analyze(variables); - promotedType = variables.getDefinition().caster.promoteNumeric(left.actual, right.actual, true); + promotedType = variables.getDefinition().ClassToType( + AnalyzerCaster.promoteNumeric(Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), true)); if (promotedType == null) { throw createError(new ClassCastException("Cannot apply less than [>=] to types " + diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java index 571e57cad24db..30a3d0d773f23 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java @@ -79,7 +79,8 @@ void analyze(Locals locals) { right.analyze(locals); if (expected == null) { - final Type promote = locals.getDefinition().caster.promoteConditional(left.actual, right.actual, left.constant, right.constant); + Type promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteConditional( + Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), left.constant, right.constant)); left.expected = promote; right.expected = promote; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java index e9816c524bf3b..6005a326fe92a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EElvis.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.AnalyzerCaster; +import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; @@ -81,7 +82,8 @@ void analyze(Locals locals) { } if (expected == null) { - final Type promote = locals.getDefinition().caster.promoteConditional(lhs.actual, rhs.actual, lhs.constant, rhs.constant); + Type promote = locals.getDefinition().ClassToType(AnalyzerCaster.promoteConditional( + Definition.TypeToClass(lhs.actual), Definition.TypeToClass(rhs.actual), lhs.constant, rhs.constant)); lhs.expected = promote; rhs.expected = promote; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index ffbb344f29cb9..13289809e49da 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -82,11 +82,12 @@ void analyze(Locals locals) { for (int i = 0; i < interfaceMethod.arguments.size(); ++i) { Definition.Type from = interfaceMethod.arguments.get(i); Definition.Type to = delegateMethod.arguments.get(i); - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, delegateMethod.rtn, interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast( + location, Definition.TypeToClass(delegateMethod.rtn), Definition.TypeToClass(interfaceMethod.rtn), false, true); } } else { // whitelist lookup diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index 07de9138e7ca4..68950f5ea2a8b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -192,11 +192,12 @@ void analyze(Locals locals) { for (int i = 0; i < interfaceMethod.arguments.size(); ++i) { Type from = interfaceMethod.arguments.get(i); Type to = desugared.parameters.get(i + captures.size()).type; - locals.getDefinition().caster.getLegalCast(location, from, to, false, true); + AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(from), Definition.TypeToClass(to), false, true); } if (interfaceMethod.rtn.equals(locals.getDefinition().voidType) == false) { - locals.getDefinition().caster.getLegalCast(location, desugared.rtnType, interfaceMethod.rtn, false, true); + AnalyzerCaster.getLegalCast( + location, Definition.TypeToClass(desugared.rtnType), Definition.TypeToClass(interfaceMethod.rtn), false, true); } actual = expected; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java index e9971b538f5af..aa81407819eb9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java @@ -90,7 +90,7 @@ void analyzeNot(Locals variables) { void analyzeBWNot(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, false); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), false)); if (promote == null) { throw createError(new ClassCastException("Cannot apply not [~] to type [" + child.actual.name + "].")); @@ -121,7 +121,7 @@ void analyzeBWNot(Locals variables) { void analyzerAdd(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, true); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply positive [+] to type [" + child.actual.name + "].")); @@ -156,7 +156,7 @@ void analyzerAdd(Locals variables) { void analyzerSub(Locals variables) { child.analyze(variables); - promote = variables.getDefinition().caster.promoteNumeric(child.actual, true); + promote = variables.getDefinition().ClassToType(AnalyzerCaster.promoteNumeric(Definition.TypeToClass(child.actual), true)); if (promote == null) { throw createError(new ClassCastException("Cannot apply negative [-] to type [" + child.actual.name + "].")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java index 09c73c525bec0..a4c2eb8cd22cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java @@ -64,13 +64,10 @@ void extractVariables(Set variables) { void analyze(Locals locals) { // We must store the array and index as variables for securing slots on the stack, and // also add the location offset to make the names unique in case of nested for each loops. - array = locals.addVariable(location, expression.actual, "#array" + location.getOffset(), - true); - index = locals.addVariable(location, locals.getDefinition().intType, "#index" + location.getOffset(), - true); - indexed = locals.getDefinition().getType(expression.actual.struct, - expression.actual.dimensions - 1); - cast = locals.getDefinition().caster.getLegalCast(location, indexed, variable.type, true, true); + array = locals.addVariable(location, expression.actual, "#array" + location.getOffset(), true); + index = locals.addVariable(location, locals.getDefinition().intType, "#index" + location.getOffset(), true); + indexed = locals.getDefinition().getType(expression.actual.struct, expression.actual.dimensions - 1); + cast = AnalyzerCaster.getLegalCast(location, Definition.TypeToClass(indexed), Definition.TypeToClass(variable.type), true, true); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java index a51a459f0f3f8..26fb4a2f8459a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java @@ -25,6 +25,7 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.MethodKey; +import org.elasticsearch.painless.Definition.def; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; @@ -85,7 +86,7 @@ void analyze(Locals locals) { } } - cast = locals.getDefinition().caster.getLegalCast(location, locals.getDefinition().DefType, variable.type, true, true); + cast = AnalyzerCaster.getLegalCast(location, def.class, Definition.TypeToClass(variable.type), true, true); } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index b8fe248601764..69abc3481a188 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.test.ESTestCase; @@ -28,73 +27,73 @@ public class AnalyzerCasterTests extends ESTestCase { private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); - private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { + private static void assertCast(Class actual, Class expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); if (actual.equals(expected)) { assertFalse(mustBeExplicit); - assertNull(definition.caster.getLegalCast(location, actual, expected, false, false)); - assertNull(definition.caster.getLegalCast(location, actual, expected, true, false)); + assertNull(AnalyzerCaster.getLegalCast(location, actual, expected, false, false)); + assertNull(AnalyzerCaster.getLegalCast(location, actual, expected, true, false)); return; } - Cast cast = definition.caster.getLegalCast(location, actual, expected, true, false); - assertEquals(actual.clazz, cast.from); - assertEquals(expected.clazz, cast.to); + Cast cast = AnalyzerCaster.getLegalCast(location, actual, expected, true, false); + assertEquals(actual, cast.from); + assertEquals(expected, cast.to); if (mustBeExplicit) { ClassCastException error = expectThrows(ClassCastException.class, - () -> definition.caster.getLegalCast(location, actual, expected, false, false)); + () -> AnalyzerCaster.getLegalCast(location, actual, expected, false, false)); assertTrue(error.getMessage().startsWith("Cannot cast")); } else { - cast = definition.caster.getLegalCast(location, actual, expected, false, false); - assertEquals(actual.clazz, cast.from); - assertEquals(expected.clazz, cast.to); + cast = AnalyzerCaster.getLegalCast(location, actual, expected, false, false); + assertEquals(actual, cast.from); + assertEquals(expected, cast.to); } } public void testNumericCasts() { - assertCast(definition.byteType, definition.byteType, false); - assertCast(definition.byteType, definition.shortType, false); - assertCast(definition.byteType, definition.intType, false); - assertCast(definition.byteType, definition.longType, false); - assertCast(definition.byteType, definition.floatType, false); - assertCast(definition.byteType, definition.doubleType, false); + assertCast(byte.class, byte.class, false); + assertCast(byte.class, short.class, false); + assertCast(byte.class, int.class, false); + assertCast(byte.class, long.class, false); + assertCast(byte.class, float.class, false); + assertCast(byte.class, double.class, false); - assertCast(definition.shortType, definition.byteType, true); - assertCast(definition.shortType, definition.shortType, false); - assertCast(definition.shortType, definition.intType, false); - assertCast(definition.shortType, definition.longType, false); - assertCast(definition.shortType, definition.floatType, false); - assertCast(definition.shortType, definition.doubleType, false); + assertCast(short.class, byte.class, true); + assertCast(short.class, short.class, false); + assertCast(short.class, int.class, false); + assertCast(short.class, long.class, false); + assertCast(short.class, float.class, false); + assertCast(short.class, double.class, false); - assertCast(definition.intType, definition.byteType, true); - assertCast(definition.intType, definition.shortType, true); - assertCast(definition.intType, definition.intType, false); - assertCast(definition.intType, definition.longType, false); - assertCast(definition.intType, definition.floatType, false); - assertCast(definition.intType, definition.doubleType, false); + assertCast(int.class, byte.class, true); + assertCast(int.class, short.class, true); + assertCast(int.class, int.class, false); + assertCast(int.class, long.class, false); + assertCast(int.class, float.class, false); + assertCast(int.class, double.class, false); - assertCast(definition.longType, definition.byteType, true); - assertCast(definition.longType, definition.shortType, true); - assertCast(definition.longType, definition.intType, true); - assertCast(definition.longType, definition.longType, false); - assertCast(definition.longType, definition.floatType, false); - assertCast(definition.longType, definition.doubleType, false); + assertCast(long.class, byte.class, true); + assertCast(long.class, short.class, true); + assertCast(long.class, int.class, true); + assertCast(long.class, long.class, false); + assertCast(long.class, float.class, false); + assertCast(long.class, double.class, false); - assertCast(definition.floatType, definition.byteType, true); - assertCast(definition.floatType, definition.shortType, true); - assertCast(definition.floatType, definition.intType, true); - assertCast(definition.floatType, definition.longType, true); - assertCast(definition.floatType, definition.floatType, false); - assertCast(definition.floatType, definition.doubleType, false); + assertCast(float.class, byte.class, true); + assertCast(float.class, short.class, true); + assertCast(float.class, int.class, true); + assertCast(float.class, long.class, true); + assertCast(float.class, float.class, false); + assertCast(float.class, double.class, false); - assertCast(definition.doubleType, definition.byteType, true); - assertCast(definition.doubleType, definition.shortType, true); - assertCast(definition.doubleType, definition.intType, true); - assertCast(definition.doubleType, definition.longType, true); - assertCast(definition.doubleType, definition.floatType, true); - assertCast(definition.doubleType, definition.doubleType, false); + assertCast(double.class, byte.class, true); + assertCast(double.class, short.class, true); + assertCast(double.class, int.class, true); + assertCast(double.class, long.class, true); + assertCast(double.class, float.class, true); + assertCast(double.class, double.class, false); } }